1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64InstrInfo.h"
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64InstPrinter.h"
12 #include "MCTargetDesc/AArch64MCExpr.h"
13 #include "MCTargetDesc/AArch64MCTargetDesc.h"
14 #include "MCTargetDesc/AArch64TargetStreamer.h"
15 #include "TargetInfo/AArch64TargetInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCLinkerOptimizationHint.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSubtargetInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCTargetOptions.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/MC/SubtargetFeature.h"
45 #include "llvm/MC/TargetRegistry.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Compiler.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/MathExtras.h"
50 #include "llvm/Support/SMLoc.h"
51 #include "llvm/Support/AArch64TargetParser.h"
52 #include "llvm/Support/TargetParser.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include <cassert>
55 #include <cctype>
56 #include <cstdint>
57 #include <cstdio>
58 #include <string>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 namespace {
66 
67 enum class RegKind {
68   Scalar,
69   NeonVector,
70   SVEDataVector,
71   SVEPredicateVector,
72   Matrix
73 };
74 
75 enum class MatrixKind { Array, Tile, Row, Col };
76 
77 enum RegConstraintEqualityTy {
78   EqualsReg,
79   EqualsSuperReg,
80   EqualsSubReg
81 };
82 
83 class AArch64AsmParser : public MCTargetAsmParser {
84 private:
85   StringRef Mnemonic; ///< Instruction mnemonic.
86 
87   // Map of register aliases registers via the .req directive.
88   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
89 
90   class PrefixInfo {
91   public:
92     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
93       PrefixInfo Prefix;
94       switch (Inst.getOpcode()) {
95       case AArch64::MOVPRFX_ZZ:
96         Prefix.Active = true;
97         Prefix.Dst = Inst.getOperand(0).getReg();
98         break;
99       case AArch64::MOVPRFX_ZPmZ_B:
100       case AArch64::MOVPRFX_ZPmZ_H:
101       case AArch64::MOVPRFX_ZPmZ_S:
102       case AArch64::MOVPRFX_ZPmZ_D:
103         Prefix.Active = true;
104         Prefix.Predicated = true;
105         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
106         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
107                "No destructive element size set for movprfx");
108         Prefix.Dst = Inst.getOperand(0).getReg();
109         Prefix.Pg = Inst.getOperand(2).getReg();
110         break;
111       case AArch64::MOVPRFX_ZPzZ_B:
112       case AArch64::MOVPRFX_ZPzZ_H:
113       case AArch64::MOVPRFX_ZPzZ_S:
114       case AArch64::MOVPRFX_ZPzZ_D:
115         Prefix.Active = true;
116         Prefix.Predicated = true;
117         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
118         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
119                "No destructive element size set for movprfx");
120         Prefix.Dst = Inst.getOperand(0).getReg();
121         Prefix.Pg = Inst.getOperand(1).getReg();
122         break;
123       default:
124         break;
125       }
126 
127       return Prefix;
128     }
129 
130     PrefixInfo() : Active(false), Predicated(false) {}
131     bool isActive() const { return Active; }
132     bool isPredicated() const { return Predicated; }
133     unsigned getElementSize() const {
134       assert(Predicated);
135       return ElementSize;
136     }
137     unsigned getDstReg() const { return Dst; }
138     unsigned getPgReg() const {
139       assert(Predicated);
140       return Pg;
141     }
142 
143   private:
144     bool Active;
145     bool Predicated;
146     unsigned ElementSize;
147     unsigned Dst;
148     unsigned Pg;
149   } NextPrefix;
150 
151   AArch64TargetStreamer &getTargetStreamer() {
152     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
153     return static_cast<AArch64TargetStreamer &>(TS);
154   }
155 
156   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
157 
158   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
159   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
160   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
161   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
162   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
163   bool parseRegister(OperandVector &Operands);
164   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
165   bool parseNeonVectorList(OperandVector &Operands);
166   bool parseOptionalMulOperand(OperandVector &Operands);
167   bool parseKeywordOperand(OperandVector &Operands);
168   bool parseOperand(OperandVector &Operands, bool isCondCode,
169                     bool invertCondCode);
170   bool parseImmExpr(int64_t &Out);
171   bool parseComma();
172   bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
173                             unsigned Last);
174 
175   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
176                       OperandVector &Operands);
177 
178   bool parseDirectiveArch(SMLoc L);
179   bool parseDirectiveArchExtension(SMLoc L);
180   bool parseDirectiveCPU(SMLoc L);
181   bool parseDirectiveInst(SMLoc L);
182 
183   bool parseDirectiveTLSDescCall(SMLoc L);
184 
185   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
186   bool parseDirectiveLtorg(SMLoc L);
187 
188   bool parseDirectiveReq(StringRef Name, SMLoc L);
189   bool parseDirectiveUnreq(SMLoc L);
190   bool parseDirectiveCFINegateRAState();
191   bool parseDirectiveCFIBKeyFrame();
192 
193   bool parseDirectiveVariantPCS(SMLoc L);
194 
195   bool parseDirectiveSEHAllocStack(SMLoc L);
196   bool parseDirectiveSEHPrologEnd(SMLoc L);
197   bool parseDirectiveSEHSaveR19R20X(SMLoc L);
198   bool parseDirectiveSEHSaveFPLR(SMLoc L);
199   bool parseDirectiveSEHSaveFPLRX(SMLoc L);
200   bool parseDirectiveSEHSaveReg(SMLoc L);
201   bool parseDirectiveSEHSaveRegX(SMLoc L);
202   bool parseDirectiveSEHSaveRegP(SMLoc L);
203   bool parseDirectiveSEHSaveRegPX(SMLoc L);
204   bool parseDirectiveSEHSaveLRPair(SMLoc L);
205   bool parseDirectiveSEHSaveFReg(SMLoc L);
206   bool parseDirectiveSEHSaveFRegX(SMLoc L);
207   bool parseDirectiveSEHSaveFRegP(SMLoc L);
208   bool parseDirectiveSEHSaveFRegPX(SMLoc L);
209   bool parseDirectiveSEHSetFP(SMLoc L);
210   bool parseDirectiveSEHAddFP(SMLoc L);
211   bool parseDirectiveSEHNop(SMLoc L);
212   bool parseDirectiveSEHSaveNext(SMLoc L);
213   bool parseDirectiveSEHEpilogStart(SMLoc L);
214   bool parseDirectiveSEHEpilogEnd(SMLoc L);
215   bool parseDirectiveSEHTrapFrame(SMLoc L);
216   bool parseDirectiveSEHMachineFrame(SMLoc L);
217   bool parseDirectiveSEHContext(SMLoc L);
218   bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
219 
220   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
221                            SmallVectorImpl<SMLoc> &Loc);
222   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
223                                OperandVector &Operands, MCStreamer &Out,
224                                uint64_t &ErrorInfo,
225                                bool MatchingInlineAsm) override;
226 /// @name Auto-generated Match Functions
227 /// {
228 
229 #define GET_ASSEMBLER_HEADER
230 #include "AArch64GenAsmMatcher.inc"
231 
232   /// }
233 
234   OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
235   OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
236                                               RegKind MatchKind);
237   OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
238   OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
239   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
240   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
241   OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
242   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
243   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
244   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
245   template <bool IsSVEPrefetch = false>
246   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
247   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
248   OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
249   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
250   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
251   template<bool AddFPZeroAsLiteral>
252   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
253   OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
254   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
255   bool tryParseNeonVectorRegister(OperandVector &Operands);
256   OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
257   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
258   template <bool ParseShiftExtend,
259             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
260   OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
261   template <bool ParseShiftExtend, bool ParseSuffix>
262   OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
263   OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
264   template <RegKind VectorKind>
265   OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
266                                           bool ExpectMatch = false);
267   OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
268   OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
269   OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
270 
271 public:
272   enum AArch64MatchResultTy {
273     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
274 #define GET_OPERAND_DIAGNOSTIC_TYPES
275 #include "AArch64GenAsmMatcher.inc"
276   };
277   bool IsILP32;
278 
279   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
280                    const MCInstrInfo &MII, const MCTargetOptions &Options)
281     : MCTargetAsmParser(Options, STI, MII) {
282     IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
283     MCAsmParserExtension::Initialize(Parser);
284     MCStreamer &S = getParser().getStreamer();
285     if (S.getTargetStreamer() == nullptr)
286       new AArch64TargetStreamer(S);
287 
288     // Alias .hword/.word/.[dx]word to the target-independent
289     // .2byte/.4byte/.8byte directives as they have the same form and
290     // semantics:
291     ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
292     Parser.addAliasForDirective(".hword", ".2byte");
293     Parser.addAliasForDirective(".word", ".4byte");
294     Parser.addAliasForDirective(".dword", ".8byte");
295     Parser.addAliasForDirective(".xword", ".8byte");
296 
297     // Initialize the set of available features.
298     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
299   }
300 
301   bool regsEqual(const MCParsedAsmOperand &Op1,
302                  const MCParsedAsmOperand &Op2) const override;
303   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
304                         SMLoc NameLoc, OperandVector &Operands) override;
305   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
306   OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
307                                         SMLoc &EndLoc) override;
308   bool ParseDirective(AsmToken DirectiveID) override;
309   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
310                                       unsigned Kind) override;
311 
312   static bool classifySymbolRef(const MCExpr *Expr,
313                                 AArch64MCExpr::VariantKind &ELFRefKind,
314                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
315                                 int64_t &Addend);
316 };
317 
318 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
319 /// instruction.
320 class AArch64Operand : public MCParsedAsmOperand {
321 private:
322   enum KindTy {
323     k_Immediate,
324     k_ShiftedImm,
325     k_CondCode,
326     k_Register,
327     k_MatrixRegister,
328     k_MatrixTileList,
329     k_SVCR,
330     k_VectorList,
331     k_VectorIndex,
332     k_Token,
333     k_SysReg,
334     k_SysCR,
335     k_Prefetch,
336     k_ShiftExtend,
337     k_FPImm,
338     k_Barrier,
339     k_PSBHint,
340     k_BTIHint,
341   } Kind;
342 
343   SMLoc StartLoc, EndLoc;
344 
345   struct TokOp {
346     const char *Data;
347     unsigned Length;
348     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
349   };
350 
351   // Separate shift/extend operand.
352   struct ShiftExtendOp {
353     AArch64_AM::ShiftExtendType Type;
354     unsigned Amount;
355     bool HasExplicitAmount;
356   };
357 
358   struct RegOp {
359     unsigned RegNum;
360     RegKind Kind;
361     int ElementWidth;
362 
363     // The register may be allowed as a different register class,
364     // e.g. for GPR64as32 or GPR32as64.
365     RegConstraintEqualityTy EqualityTy;
366 
367     // In some cases the shift/extend needs to be explicitly parsed together
368     // with the register, rather than as a separate operand. This is needed
369     // for addressing modes where the instruction as a whole dictates the
370     // scaling/extend, rather than specific bits in the instruction.
371     // By parsing them as a single operand, we avoid the need to pass an
372     // extra operand in all CodeGen patterns (because all operands need to
373     // have an associated value), and we avoid the need to update TableGen to
374     // accept operands that have no associated bits in the instruction.
375     //
376     // An added benefit of parsing them together is that the assembler
377     // can give a sensible diagnostic if the scaling is not correct.
378     //
379     // The default is 'lsl #0' (HasExplicitAmount = false) if no
380     // ShiftExtend is specified.
381     ShiftExtendOp ShiftExtend;
382   };
383 
384   struct MatrixRegOp {
385     unsigned RegNum;
386     unsigned ElementWidth;
387     MatrixKind Kind;
388   };
389 
390   struct MatrixTileListOp {
391     unsigned RegMask = 0;
392   };
393 
394   struct VectorListOp {
395     unsigned RegNum;
396     unsigned Count;
397     unsigned NumElements;
398     unsigned ElementWidth;
399     RegKind  RegisterKind;
400   };
401 
402   struct VectorIndexOp {
403     int Val;
404   };
405 
406   struct ImmOp {
407     const MCExpr *Val;
408   };
409 
410   struct ShiftedImmOp {
411     const MCExpr *Val;
412     unsigned ShiftAmount;
413   };
414 
415   struct CondCodeOp {
416     AArch64CC::CondCode Code;
417   };
418 
419   struct FPImmOp {
420     uint64_t Val; // APFloat value bitcasted to uint64_t.
421     bool IsExact; // describes whether parsed value was exact.
422   };
423 
424   struct BarrierOp {
425     const char *Data;
426     unsigned Length;
427     unsigned Val; // Not the enum since not all values have names.
428     bool HasnXSModifier;
429   };
430 
431   struct SysRegOp {
432     const char *Data;
433     unsigned Length;
434     uint32_t MRSReg;
435     uint32_t MSRReg;
436     uint32_t PStateField;
437   };
438 
439   struct SysCRImmOp {
440     unsigned Val;
441   };
442 
443   struct PrefetchOp {
444     const char *Data;
445     unsigned Length;
446     unsigned Val;
447   };
448 
449   struct PSBHintOp {
450     const char *Data;
451     unsigned Length;
452     unsigned Val;
453   };
454 
455   struct BTIHintOp {
456     const char *Data;
457     unsigned Length;
458     unsigned Val;
459   };
460 
461   struct SVCROp {
462     const char *Data;
463     unsigned Length;
464     unsigned PStateField;
465   };
466 
467   union {
468     struct TokOp Tok;
469     struct RegOp Reg;
470     struct MatrixRegOp MatrixReg;
471     struct MatrixTileListOp MatrixTileList;
472     struct VectorListOp VectorList;
473     struct VectorIndexOp VectorIndex;
474     struct ImmOp Imm;
475     struct ShiftedImmOp ShiftedImm;
476     struct CondCodeOp CondCode;
477     struct FPImmOp FPImm;
478     struct BarrierOp Barrier;
479     struct SysRegOp SysReg;
480     struct SysCRImmOp SysCRImm;
481     struct PrefetchOp Prefetch;
482     struct PSBHintOp PSBHint;
483     struct BTIHintOp BTIHint;
484     struct ShiftExtendOp ShiftExtend;
485     struct SVCROp SVCR;
486   };
487 
488   // Keep the MCContext around as the MCExprs may need manipulated during
489   // the add<>Operands() calls.
490   MCContext &Ctx;
491 
492 public:
493   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
494 
495   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
496     Kind = o.Kind;
497     StartLoc = o.StartLoc;
498     EndLoc = o.EndLoc;
499     switch (Kind) {
500     case k_Token:
501       Tok = o.Tok;
502       break;
503     case k_Immediate:
504       Imm = o.Imm;
505       break;
506     case k_ShiftedImm:
507       ShiftedImm = o.ShiftedImm;
508       break;
509     case k_CondCode:
510       CondCode = o.CondCode;
511       break;
512     case k_FPImm:
513       FPImm = o.FPImm;
514       break;
515     case k_Barrier:
516       Barrier = o.Barrier;
517       break;
518     case k_Register:
519       Reg = o.Reg;
520       break;
521     case k_MatrixRegister:
522       MatrixReg = o.MatrixReg;
523       break;
524     case k_MatrixTileList:
525       MatrixTileList = o.MatrixTileList;
526       break;
527     case k_VectorList:
528       VectorList = o.VectorList;
529       break;
530     case k_VectorIndex:
531       VectorIndex = o.VectorIndex;
532       break;
533     case k_SysReg:
534       SysReg = o.SysReg;
535       break;
536     case k_SysCR:
537       SysCRImm = o.SysCRImm;
538       break;
539     case k_Prefetch:
540       Prefetch = o.Prefetch;
541       break;
542     case k_PSBHint:
543       PSBHint = o.PSBHint;
544       break;
545     case k_BTIHint:
546       BTIHint = o.BTIHint;
547       break;
548     case k_ShiftExtend:
549       ShiftExtend = o.ShiftExtend;
550       break;
551     case k_SVCR:
552       SVCR = o.SVCR;
553       break;
554     }
555   }
556 
557   /// getStartLoc - Get the location of the first token of this operand.
558   SMLoc getStartLoc() const override { return StartLoc; }
559   /// getEndLoc - Get the location of the last token of this operand.
560   SMLoc getEndLoc() const override { return EndLoc; }
561 
562   StringRef getToken() const {
563     assert(Kind == k_Token && "Invalid access!");
564     return StringRef(Tok.Data, Tok.Length);
565   }
566 
567   bool isTokenSuffix() const {
568     assert(Kind == k_Token && "Invalid access!");
569     return Tok.IsSuffix;
570   }
571 
572   const MCExpr *getImm() const {
573     assert(Kind == k_Immediate && "Invalid access!");
574     return Imm.Val;
575   }
576 
577   const MCExpr *getShiftedImmVal() const {
578     assert(Kind == k_ShiftedImm && "Invalid access!");
579     return ShiftedImm.Val;
580   }
581 
582   unsigned getShiftedImmShift() const {
583     assert(Kind == k_ShiftedImm && "Invalid access!");
584     return ShiftedImm.ShiftAmount;
585   }
586 
587   AArch64CC::CondCode getCondCode() const {
588     assert(Kind == k_CondCode && "Invalid access!");
589     return CondCode.Code;
590   }
591 
592   APFloat getFPImm() const {
593     assert (Kind == k_FPImm && "Invalid access!");
594     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
595   }
596 
597   bool getFPImmIsExact() const {
598     assert (Kind == k_FPImm && "Invalid access!");
599     return FPImm.IsExact;
600   }
601 
602   unsigned getBarrier() const {
603     assert(Kind == k_Barrier && "Invalid access!");
604     return Barrier.Val;
605   }
606 
607   StringRef getBarrierName() const {
608     assert(Kind == k_Barrier && "Invalid access!");
609     return StringRef(Barrier.Data, Barrier.Length);
610   }
611 
612   bool getBarriernXSModifier() const {
613     assert(Kind == k_Barrier && "Invalid access!");
614     return Barrier.HasnXSModifier;
615   }
616 
617   unsigned getReg() const override {
618     assert(Kind == k_Register && "Invalid access!");
619     return Reg.RegNum;
620   }
621 
622   unsigned getMatrixReg() const {
623     assert(Kind == k_MatrixRegister && "Invalid access!");
624     return MatrixReg.RegNum;
625   }
626 
627   unsigned getMatrixElementWidth() const {
628     assert(Kind == k_MatrixRegister && "Invalid access!");
629     return MatrixReg.ElementWidth;
630   }
631 
632   MatrixKind getMatrixKind() const {
633     assert(Kind == k_MatrixRegister && "Invalid access!");
634     return MatrixReg.Kind;
635   }
636 
637   unsigned getMatrixTileListRegMask() const {
638     assert(isMatrixTileList() && "Invalid access!");
639     return MatrixTileList.RegMask;
640   }
641 
642   RegConstraintEqualityTy getRegEqualityTy() const {
643     assert(Kind == k_Register && "Invalid access!");
644     return Reg.EqualityTy;
645   }
646 
647   unsigned getVectorListStart() const {
648     assert(Kind == k_VectorList && "Invalid access!");
649     return VectorList.RegNum;
650   }
651 
652   unsigned getVectorListCount() const {
653     assert(Kind == k_VectorList && "Invalid access!");
654     return VectorList.Count;
655   }
656 
657   int getVectorIndex() const {
658     assert(Kind == k_VectorIndex && "Invalid access!");
659     return VectorIndex.Val;
660   }
661 
662   StringRef getSysReg() const {
663     assert(Kind == k_SysReg && "Invalid access!");
664     return StringRef(SysReg.Data, SysReg.Length);
665   }
666 
667   unsigned getSysCR() const {
668     assert(Kind == k_SysCR && "Invalid access!");
669     return SysCRImm.Val;
670   }
671 
672   unsigned getPrefetch() const {
673     assert(Kind == k_Prefetch && "Invalid access!");
674     return Prefetch.Val;
675   }
676 
677   unsigned getPSBHint() const {
678     assert(Kind == k_PSBHint && "Invalid access!");
679     return PSBHint.Val;
680   }
681 
682   StringRef getPSBHintName() const {
683     assert(Kind == k_PSBHint && "Invalid access!");
684     return StringRef(PSBHint.Data, PSBHint.Length);
685   }
686 
687   unsigned getBTIHint() const {
688     assert(Kind == k_BTIHint && "Invalid access!");
689     return BTIHint.Val;
690   }
691 
692   StringRef getBTIHintName() const {
693     assert(Kind == k_BTIHint && "Invalid access!");
694     return StringRef(BTIHint.Data, BTIHint.Length);
695   }
696 
697   StringRef getSVCR() const {
698     assert(Kind == k_SVCR && "Invalid access!");
699     return StringRef(SVCR.Data, SVCR.Length);
700   }
701 
702   StringRef getPrefetchName() const {
703     assert(Kind == k_Prefetch && "Invalid access!");
704     return StringRef(Prefetch.Data, Prefetch.Length);
705   }
706 
707   AArch64_AM::ShiftExtendType getShiftExtendType() const {
708     if (Kind == k_ShiftExtend)
709       return ShiftExtend.Type;
710     if (Kind == k_Register)
711       return Reg.ShiftExtend.Type;
712     llvm_unreachable("Invalid access!");
713   }
714 
715   unsigned getShiftExtendAmount() const {
716     if (Kind == k_ShiftExtend)
717       return ShiftExtend.Amount;
718     if (Kind == k_Register)
719       return Reg.ShiftExtend.Amount;
720     llvm_unreachable("Invalid access!");
721   }
722 
723   bool hasShiftExtendAmount() const {
724     if (Kind == k_ShiftExtend)
725       return ShiftExtend.HasExplicitAmount;
726     if (Kind == k_Register)
727       return Reg.ShiftExtend.HasExplicitAmount;
728     llvm_unreachable("Invalid access!");
729   }
730 
731   bool isImm() const override { return Kind == k_Immediate; }
732   bool isMem() const override { return false; }
733 
734   bool isUImm6() const {
735     if (!isImm())
736       return false;
737     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
738     if (!MCE)
739       return false;
740     int64_t Val = MCE->getValue();
741     return (Val >= 0 && Val < 64);
742   }
743 
744   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
745 
746   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
747     return isImmScaled<Bits, Scale>(true);
748   }
749 
750   template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
751     return isImmScaled<Bits, Scale>(false);
752   }
753 
754   template <int Bits, int Scale>
755   DiagnosticPredicate isImmScaled(bool Signed) const {
756     if (!isImm())
757       return DiagnosticPredicateTy::NoMatch;
758 
759     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
760     if (!MCE)
761       return DiagnosticPredicateTy::NoMatch;
762 
763     int64_t MinVal, MaxVal;
764     if (Signed) {
765       int64_t Shift = Bits - 1;
766       MinVal = (int64_t(1) << Shift) * -Scale;
767       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
768     } else {
769       MinVal = 0;
770       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
771     }
772 
773     int64_t Val = MCE->getValue();
774     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
775       return DiagnosticPredicateTy::Match;
776 
777     return DiagnosticPredicateTy::NearMatch;
778   }
779 
780   DiagnosticPredicate isSVEPattern() const {
781     if (!isImm())
782       return DiagnosticPredicateTy::NoMatch;
783     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
784     if (!MCE)
785       return DiagnosticPredicateTy::NoMatch;
786     int64_t Val = MCE->getValue();
787     if (Val >= 0 && Val < 32)
788       return DiagnosticPredicateTy::Match;
789     return DiagnosticPredicateTy::NearMatch;
790   }
791 
792   bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
793     AArch64MCExpr::VariantKind ELFRefKind;
794     MCSymbolRefExpr::VariantKind DarwinRefKind;
795     int64_t Addend;
796     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
797                                            Addend)) {
798       // If we don't understand the expression, assume the best and
799       // let the fixup and relocation code deal with it.
800       return true;
801     }
802 
803     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
804         ELFRefKind == AArch64MCExpr::VK_LO12 ||
805         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
806         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
807         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
808         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
809         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
810         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
811         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
812         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
813         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
814         ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
815       // Note that we don't range-check the addend. It's adjusted modulo page
816       // size when converted, so there is no "out of range" condition when using
817       // @pageoff.
818       return true;
819     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
820                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
821       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
822       return Addend == 0;
823     }
824 
825     return false;
826   }
827 
828   template <int Scale> bool isUImm12Offset() const {
829     if (!isImm())
830       return false;
831 
832     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
833     if (!MCE)
834       return isSymbolicUImm12Offset(getImm());
835 
836     int64_t Val = MCE->getValue();
837     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
838   }
839 
840   template <int N, int M>
841   bool isImmInRange() const {
842     if (!isImm())
843       return false;
844     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
845     if (!MCE)
846       return false;
847     int64_t Val = MCE->getValue();
848     return (Val >= N && Val <= M);
849   }
850 
851   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
852   // a logical immediate can always be represented when inverted.
853   template <typename T>
854   bool isLogicalImm() const {
855     if (!isImm())
856       return false;
857     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
858     if (!MCE)
859       return false;
860 
861     int64_t Val = MCE->getValue();
862     // Avoid left shift by 64 directly.
863     uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
864     // Allow all-0 or all-1 in top bits to permit bitwise NOT.
865     if ((Val & Upper) && (Val & Upper) != Upper)
866       return false;
867 
868     return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
869   }
870 
871   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
872 
873   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
874   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
875   /// immediate that can be shifted by 'Shift'.
876   template <unsigned Width>
877   Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
878     if (isShiftedImm() && Width == getShiftedImmShift())
879       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
880         return std::make_pair(CE->getValue(), Width);
881 
882     if (isImm())
883       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
884         int64_t Val = CE->getValue();
885         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
886           return std::make_pair(Val >> Width, Width);
887         else
888           return std::make_pair(Val, 0u);
889       }
890 
891     return {};
892   }
893 
894   bool isAddSubImm() const {
895     if (!isShiftedImm() && !isImm())
896       return false;
897 
898     const MCExpr *Expr;
899 
900     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
901     if (isShiftedImm()) {
902       unsigned Shift = ShiftedImm.ShiftAmount;
903       Expr = ShiftedImm.Val;
904       if (Shift != 0 && Shift != 12)
905         return false;
906     } else {
907       Expr = getImm();
908     }
909 
910     AArch64MCExpr::VariantKind ELFRefKind;
911     MCSymbolRefExpr::VariantKind DarwinRefKind;
912     int64_t Addend;
913     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
914                                           DarwinRefKind, Addend)) {
915       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
916           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
917           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
918           || ELFRefKind == AArch64MCExpr::VK_LO12
919           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
920           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
921           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
922           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
923           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
924           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
925           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
926           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
927           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
928     }
929 
930     // If it's a constant, it should be a real immediate in range.
931     if (auto ShiftedVal = getShiftedVal<12>())
932       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
933 
934     // If it's an expression, we hope for the best and let the fixup/relocation
935     // code deal with it.
936     return true;
937   }
938 
939   bool isAddSubImmNeg() const {
940     if (!isShiftedImm() && !isImm())
941       return false;
942 
943     // Otherwise it should be a real negative immediate in range.
944     if (auto ShiftedVal = getShiftedVal<12>())
945       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
946 
947     return false;
948   }
949 
950   // Signed value in the range -128 to +127. For element widths of
951   // 16 bits or higher it may also be a signed multiple of 256 in the
952   // range -32768 to +32512.
953   // For element-width of 8 bits a range of -128 to 255 is accepted,
954   // since a copy of a byte can be either signed/unsigned.
955   template <typename T>
956   DiagnosticPredicate isSVECpyImm() const {
957     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
958       return DiagnosticPredicateTy::NoMatch;
959 
960     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
961                   std::is_same<int8_t, T>::value;
962     if (auto ShiftedImm = getShiftedVal<8>())
963       if (!(IsByte && ShiftedImm->second) &&
964           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
965                                      << ShiftedImm->second))
966         return DiagnosticPredicateTy::Match;
967 
968     return DiagnosticPredicateTy::NearMatch;
969   }
970 
971   // Unsigned value in the range 0 to 255. For element widths of
972   // 16 bits or higher it may also be a signed multiple of 256 in the
973   // range 0 to 65280.
974   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
975     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
976       return DiagnosticPredicateTy::NoMatch;
977 
978     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
979                   std::is_same<int8_t, T>::value;
980     if (auto ShiftedImm = getShiftedVal<8>())
981       if (!(IsByte && ShiftedImm->second) &&
982           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
983                                         << ShiftedImm->second))
984         return DiagnosticPredicateTy::Match;
985 
986     return DiagnosticPredicateTy::NearMatch;
987   }
988 
989   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
990     if (isLogicalImm<T>() && !isSVECpyImm<T>())
991       return DiagnosticPredicateTy::Match;
992     return DiagnosticPredicateTy::NoMatch;
993   }
994 
995   bool isCondCode() const { return Kind == k_CondCode; }
996 
997   bool isSIMDImmType10() const {
998     if (!isImm())
999       return false;
1000     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1001     if (!MCE)
1002       return false;
1003     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1004   }
1005 
1006   template<int N>
1007   bool isBranchTarget() const {
1008     if (!isImm())
1009       return false;
1010     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1011     if (!MCE)
1012       return true;
1013     int64_t Val = MCE->getValue();
1014     if (Val & 0x3)
1015       return false;
1016     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1017     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1018   }
1019 
1020   bool
1021   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1022     if (!isImm())
1023       return false;
1024 
1025     AArch64MCExpr::VariantKind ELFRefKind;
1026     MCSymbolRefExpr::VariantKind DarwinRefKind;
1027     int64_t Addend;
1028     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1029                                              DarwinRefKind, Addend)) {
1030       return false;
1031     }
1032     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1033       return false;
1034 
1035     return llvm::is_contained(AllowedModifiers, ELFRefKind);
1036   }
1037 
1038   bool isMovWSymbolG3() const {
1039     return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1040   }
1041 
1042   bool isMovWSymbolG2() const {
1043     return isMovWSymbol(
1044         {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1045          AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1046          AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1047          AArch64MCExpr::VK_DTPREL_G2});
1048   }
1049 
1050   bool isMovWSymbolG1() const {
1051     return isMovWSymbol(
1052         {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1053          AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1054          AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1055          AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1056          AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1057   }
1058 
1059   bool isMovWSymbolG0() const {
1060     return isMovWSymbol(
1061         {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1062          AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1063          AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1064          AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1065          AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1066   }
1067 
1068   template<int RegWidth, int Shift>
1069   bool isMOVZMovAlias() const {
1070     if (!isImm()) return false;
1071 
1072     const MCExpr *E = getImm();
1073     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1074       uint64_t Value = CE->getValue();
1075 
1076       return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1077     }
1078     // Only supports the case of Shift being 0 if an expression is used as an
1079     // operand
1080     return !Shift && E;
1081   }
1082 
1083   template<int RegWidth, int Shift>
1084   bool isMOVNMovAlias() const {
1085     if (!isImm()) return false;
1086 
1087     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1088     if (!CE) return false;
1089     uint64_t Value = CE->getValue();
1090 
1091     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1092   }
1093 
1094   bool isFPImm() const {
1095     return Kind == k_FPImm &&
1096            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1097   }
1098 
1099   bool isBarrier() const {
1100     return Kind == k_Barrier && !getBarriernXSModifier();
1101   }
1102   bool isBarriernXS() const {
1103     return Kind == k_Barrier && getBarriernXSModifier();
1104   }
1105   bool isSysReg() const { return Kind == k_SysReg; }
1106 
1107   bool isMRSSystemRegister() const {
1108     if (!isSysReg()) return false;
1109 
1110     return SysReg.MRSReg != -1U;
1111   }
1112 
1113   bool isMSRSystemRegister() const {
1114     if (!isSysReg()) return false;
1115     return SysReg.MSRReg != -1U;
1116   }
1117 
1118   bool isSystemPStateFieldWithImm0_1() const {
1119     if (!isSysReg()) return false;
1120     return (SysReg.PStateField == AArch64PState::PAN ||
1121             SysReg.PStateField == AArch64PState::DIT ||
1122             SysReg.PStateField == AArch64PState::UAO ||
1123             SysReg.PStateField == AArch64PState::SSBS);
1124   }
1125 
1126   bool isSystemPStateFieldWithImm0_15() const {
1127     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1128     return SysReg.PStateField != -1U;
1129   }
1130 
1131   bool isSVCR() const {
1132     if (Kind != k_SVCR)
1133       return false;
1134     return SVCR.PStateField != -1U;
1135   }
1136 
1137   bool isReg() const override {
1138     return Kind == k_Register;
1139   }
1140 
1141   bool isScalarReg() const {
1142     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1143   }
1144 
1145   bool isNeonVectorReg() const {
1146     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1147   }
1148 
1149   bool isNeonVectorRegLo() const {
1150     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1151            (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1152                 Reg.RegNum) ||
1153             AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1154                 Reg.RegNum));
1155   }
1156 
1157   bool isMatrix() const { return Kind == k_MatrixRegister; }
1158   bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1159 
1160   template <unsigned Class> bool isSVEVectorReg() const {
1161     RegKind RK;
1162     switch (Class) {
1163     case AArch64::ZPRRegClassID:
1164     case AArch64::ZPR_3bRegClassID:
1165     case AArch64::ZPR_4bRegClassID:
1166       RK = RegKind::SVEDataVector;
1167       break;
1168     case AArch64::PPRRegClassID:
1169     case AArch64::PPR_3bRegClassID:
1170       RK = RegKind::SVEPredicateVector;
1171       break;
1172     default:
1173       llvm_unreachable("Unsupport register class");
1174     }
1175 
1176     return (Kind == k_Register && Reg.Kind == RK) &&
1177            AArch64MCRegisterClasses[Class].contains(getReg());
1178   }
1179 
1180   template <unsigned Class> bool isFPRasZPR() const {
1181     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1182            AArch64MCRegisterClasses[Class].contains(getReg());
1183   }
1184 
1185   template <int ElementWidth, unsigned Class>
1186   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1187     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1188       return DiagnosticPredicateTy::NoMatch;
1189 
1190     if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1191       return DiagnosticPredicateTy::Match;
1192 
1193     return DiagnosticPredicateTy::NearMatch;
1194   }
1195 
1196   template <int ElementWidth, unsigned Class>
1197   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1198     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1199       return DiagnosticPredicateTy::NoMatch;
1200 
1201     if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1202       return DiagnosticPredicateTy::Match;
1203 
1204     return DiagnosticPredicateTy::NearMatch;
1205   }
1206 
1207   template <int ElementWidth, unsigned Class,
1208             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1209             bool ShiftWidthAlwaysSame>
1210   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1211     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1212     if (!VectorMatch.isMatch())
1213       return DiagnosticPredicateTy::NoMatch;
1214 
1215     // Give a more specific diagnostic when the user has explicitly typed in
1216     // a shift-amount that does not match what is expected, but for which
1217     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1218     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1219     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1220                         ShiftExtendTy == AArch64_AM::SXTW) &&
1221         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1222       return DiagnosticPredicateTy::NoMatch;
1223 
1224     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1225       return DiagnosticPredicateTy::Match;
1226 
1227     return DiagnosticPredicateTy::NearMatch;
1228   }
1229 
1230   bool isGPR32as64() const {
1231     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1232       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1233   }
1234 
1235   bool isGPR64as32() const {
1236     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1237       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1238   }
1239 
1240   bool isGPR64x8() const {
1241     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1242            AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1243                Reg.RegNum);
1244   }
1245 
1246   bool isWSeqPair() const {
1247     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1248            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1249                Reg.RegNum);
1250   }
1251 
1252   bool isXSeqPair() const {
1253     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1254            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1255                Reg.RegNum);
1256   }
1257 
1258   template<int64_t Angle, int64_t Remainder>
1259   DiagnosticPredicate isComplexRotation() const {
1260     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1261 
1262     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1263     if (!CE) return DiagnosticPredicateTy::NoMatch;
1264     uint64_t Value = CE->getValue();
1265 
1266     if (Value % Angle == Remainder && Value <= 270)
1267       return DiagnosticPredicateTy::Match;
1268     return DiagnosticPredicateTy::NearMatch;
1269   }
1270 
1271   template <unsigned RegClassID> bool isGPR64() const {
1272     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1273            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1274   }
1275 
1276   template <unsigned RegClassID, int ExtWidth>
1277   DiagnosticPredicate isGPR64WithShiftExtend() const {
1278     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1279       return DiagnosticPredicateTy::NoMatch;
1280 
1281     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1282         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1283       return DiagnosticPredicateTy::Match;
1284     return DiagnosticPredicateTy::NearMatch;
1285   }
1286 
1287   /// Is this a vector list with the type implicit (presumably attached to the
1288   /// instruction itself)?
1289   template <RegKind VectorKind, unsigned NumRegs>
1290   bool isImplicitlyTypedVectorList() const {
1291     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1292            VectorList.NumElements == 0 &&
1293            VectorList.RegisterKind == VectorKind;
1294   }
1295 
1296   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1297             unsigned ElementWidth>
1298   bool isTypedVectorList() const {
1299     if (Kind != k_VectorList)
1300       return false;
1301     if (VectorList.Count != NumRegs)
1302       return false;
1303     if (VectorList.RegisterKind != VectorKind)
1304       return false;
1305     if (VectorList.ElementWidth != ElementWidth)
1306       return false;
1307     return VectorList.NumElements == NumElements;
1308   }
1309 
1310   template <int Min, int Max>
1311   DiagnosticPredicate isVectorIndex() const {
1312     if (Kind != k_VectorIndex)
1313       return DiagnosticPredicateTy::NoMatch;
1314     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1315       return DiagnosticPredicateTy::Match;
1316     return DiagnosticPredicateTy::NearMatch;
1317   }
1318 
1319   bool isToken() const override { return Kind == k_Token; }
1320 
1321   bool isTokenEqual(StringRef Str) const {
1322     return Kind == k_Token && getToken() == Str;
1323   }
1324   bool isSysCR() const { return Kind == k_SysCR; }
1325   bool isPrefetch() const { return Kind == k_Prefetch; }
1326   bool isPSBHint() const { return Kind == k_PSBHint; }
1327   bool isBTIHint() const { return Kind == k_BTIHint; }
1328   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1329   bool isShifter() const {
1330     if (!isShiftExtend())
1331       return false;
1332 
1333     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1334     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1335             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1336             ST == AArch64_AM::MSL);
1337   }
1338 
1339   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1340     if (Kind != k_FPImm)
1341       return DiagnosticPredicateTy::NoMatch;
1342 
1343     if (getFPImmIsExact()) {
1344       // Lookup the immediate from table of supported immediates.
1345       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1346       assert(Desc && "Unknown enum value");
1347 
1348       // Calculate its FP value.
1349       APFloat RealVal(APFloat::IEEEdouble());
1350       auto StatusOrErr =
1351           RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1352       if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1353         llvm_unreachable("FP immediate is not exact");
1354 
1355       if (getFPImm().bitwiseIsEqual(RealVal))
1356         return DiagnosticPredicateTy::Match;
1357     }
1358 
1359     return DiagnosticPredicateTy::NearMatch;
1360   }
1361 
1362   template <unsigned ImmA, unsigned ImmB>
1363   DiagnosticPredicate isExactFPImm() const {
1364     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1365     if ((Res = isExactFPImm<ImmA>()))
1366       return DiagnosticPredicateTy::Match;
1367     if ((Res = isExactFPImm<ImmB>()))
1368       return DiagnosticPredicateTy::Match;
1369     return Res;
1370   }
1371 
1372   bool isExtend() const {
1373     if (!isShiftExtend())
1374       return false;
1375 
1376     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1377     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1378             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1379             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1380             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1381             ET == AArch64_AM::LSL) &&
1382            getShiftExtendAmount() <= 4;
1383   }
1384 
1385   bool isExtend64() const {
1386     if (!isExtend())
1387       return false;
1388     // Make sure the extend expects a 32-bit source register.
1389     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1390     return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1391            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1392            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1393   }
1394 
1395   bool isExtendLSL64() const {
1396     if (!isExtend())
1397       return false;
1398     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1399     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1400             ET == AArch64_AM::LSL) &&
1401            getShiftExtendAmount() <= 4;
1402   }
1403 
1404   template<int Width> bool isMemXExtend() const {
1405     if (!isExtend())
1406       return false;
1407     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1408     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1409            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1410             getShiftExtendAmount() == 0);
1411   }
1412 
1413   template<int Width> bool isMemWExtend() const {
1414     if (!isExtend())
1415       return false;
1416     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1417     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1418            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1419             getShiftExtendAmount() == 0);
1420   }
1421 
1422   template <unsigned width>
1423   bool isArithmeticShifter() const {
1424     if (!isShifter())
1425       return false;
1426 
1427     // An arithmetic shifter is LSL, LSR, or ASR.
1428     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1429     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1430             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1431   }
1432 
1433   template <unsigned width>
1434   bool isLogicalShifter() const {
1435     if (!isShifter())
1436       return false;
1437 
1438     // A logical shifter is LSL, LSR, ASR or ROR.
1439     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1440     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1441             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1442            getShiftExtendAmount() < width;
1443   }
1444 
1445   bool isMovImm32Shifter() const {
1446     if (!isShifter())
1447       return false;
1448 
1449     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1450     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1451     if (ST != AArch64_AM::LSL)
1452       return false;
1453     uint64_t Val = getShiftExtendAmount();
1454     return (Val == 0 || Val == 16);
1455   }
1456 
1457   bool isMovImm64Shifter() const {
1458     if (!isShifter())
1459       return false;
1460 
1461     // A MOVi shifter is LSL of 0 or 16.
1462     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1463     if (ST != AArch64_AM::LSL)
1464       return false;
1465     uint64_t Val = getShiftExtendAmount();
1466     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1467   }
1468 
1469   bool isLogicalVecShifter() const {
1470     if (!isShifter())
1471       return false;
1472 
1473     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1474     unsigned Shift = getShiftExtendAmount();
1475     return getShiftExtendType() == AArch64_AM::LSL &&
1476            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1477   }
1478 
1479   bool isLogicalVecHalfWordShifter() const {
1480     if (!isLogicalVecShifter())
1481       return false;
1482 
1483     // A logical vector shifter is a left shift by 0 or 8.
1484     unsigned Shift = getShiftExtendAmount();
1485     return getShiftExtendType() == AArch64_AM::LSL &&
1486            (Shift == 0 || Shift == 8);
1487   }
1488 
1489   bool isMoveVecShifter() const {
1490     if (!isShiftExtend())
1491       return false;
1492 
1493     // A logical vector shifter is a left shift by 8 or 16.
1494     unsigned Shift = getShiftExtendAmount();
1495     return getShiftExtendType() == AArch64_AM::MSL &&
1496            (Shift == 8 || Shift == 16);
1497   }
1498 
1499   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1500   // to LDUR/STUR when the offset is not legal for the former but is for
1501   // the latter. As such, in addition to checking for being a legal unscaled
1502   // address, also check that it is not a legal scaled address. This avoids
1503   // ambiguity in the matcher.
1504   template<int Width>
1505   bool isSImm9OffsetFB() const {
1506     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1507   }
1508 
1509   bool isAdrpLabel() const {
1510     // Validation was handled during parsing, so we just verify that
1511     // something didn't go haywire.
1512     if (!isImm())
1513         return false;
1514 
1515     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1516       int64_t Val = CE->getValue();
1517       int64_t Min = - (4096 * (1LL << (21 - 1)));
1518       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1519       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1520     }
1521 
1522     return true;
1523   }
1524 
1525   bool isAdrLabel() const {
1526     // Validation was handled during parsing, so we just verify that
1527     // something didn't go haywire.
1528     if (!isImm())
1529         return false;
1530 
1531     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1532       int64_t Val = CE->getValue();
1533       int64_t Min = - (1LL << (21 - 1));
1534       int64_t Max = ((1LL << (21 - 1)) - 1);
1535       return Val >= Min && Val <= Max;
1536     }
1537 
1538     return true;
1539   }
1540 
1541   template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1542   DiagnosticPredicate isMatrixRegOperand() const {
1543     if (!isMatrix())
1544       return DiagnosticPredicateTy::NoMatch;
1545     if (getMatrixKind() != Kind ||
1546         !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1547         EltSize != getMatrixElementWidth())
1548       return DiagnosticPredicateTy::NearMatch;
1549     return DiagnosticPredicateTy::Match;
1550   }
1551 
1552   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1553     // Add as immediates when possible.  Null MCExpr = 0.
1554     if (!Expr)
1555       Inst.addOperand(MCOperand::createImm(0));
1556     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1557       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1558     else
1559       Inst.addOperand(MCOperand::createExpr(Expr));
1560   }
1561 
1562   void addRegOperands(MCInst &Inst, unsigned N) const {
1563     assert(N == 1 && "Invalid number of operands!");
1564     Inst.addOperand(MCOperand::createReg(getReg()));
1565   }
1566 
1567   void addMatrixOperands(MCInst &Inst, unsigned N) const {
1568     assert(N == 1 && "Invalid number of operands!");
1569     Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1570   }
1571 
1572   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1573     assert(N == 1 && "Invalid number of operands!");
1574     assert(
1575         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1576 
1577     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1578     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1579         RI->getEncodingValue(getReg()));
1580 
1581     Inst.addOperand(MCOperand::createReg(Reg));
1582   }
1583 
1584   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1585     assert(N == 1 && "Invalid number of operands!");
1586     assert(
1587         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1588 
1589     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1590     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1591         RI->getEncodingValue(getReg()));
1592 
1593     Inst.addOperand(MCOperand::createReg(Reg));
1594   }
1595 
1596   template <int Width>
1597   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1598     unsigned Base;
1599     switch (Width) {
1600     case 8:   Base = AArch64::B0; break;
1601     case 16:  Base = AArch64::H0; break;
1602     case 32:  Base = AArch64::S0; break;
1603     case 64:  Base = AArch64::D0; break;
1604     case 128: Base = AArch64::Q0; break;
1605     default:
1606       llvm_unreachable("Unsupported width");
1607     }
1608     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1609   }
1610 
1611   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1612     assert(N == 1 && "Invalid number of operands!");
1613     assert(
1614         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1615     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1616   }
1617 
1618   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1619     assert(N == 1 && "Invalid number of operands!");
1620     assert(
1621         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1622     Inst.addOperand(MCOperand::createReg(getReg()));
1623   }
1624 
1625   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1626     assert(N == 1 && "Invalid number of operands!");
1627     Inst.addOperand(MCOperand::createReg(getReg()));
1628   }
1629 
1630   enum VecListIndexType {
1631     VecListIdx_DReg = 0,
1632     VecListIdx_QReg = 1,
1633     VecListIdx_ZReg = 2,
1634   };
1635 
1636   template <VecListIndexType RegTy, unsigned NumRegs>
1637   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1638     assert(N == 1 && "Invalid number of operands!");
1639     static const unsigned FirstRegs[][5] = {
1640       /* DReg */ { AArch64::Q0,
1641                    AArch64::D0,       AArch64::D0_D1,
1642                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1643       /* QReg */ { AArch64::Q0,
1644                    AArch64::Q0,       AArch64::Q0_Q1,
1645                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1646       /* ZReg */ { AArch64::Z0,
1647                    AArch64::Z0,       AArch64::Z0_Z1,
1648                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1649     };
1650 
1651     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1652            " NumRegs must be <= 4 for ZRegs");
1653 
1654     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1655     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1656                                          FirstRegs[(unsigned)RegTy][0]));
1657   }
1658 
1659   void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1660     assert(N == 1 && "Invalid number of operands!");
1661     unsigned RegMask = getMatrixTileListRegMask();
1662     assert(RegMask <= 0xFF && "Invalid mask!");
1663     Inst.addOperand(MCOperand::createImm(RegMask));
1664   }
1665 
1666   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1667     assert(N == 1 && "Invalid number of operands!");
1668     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1669   }
1670 
1671   template <unsigned ImmIs0, unsigned ImmIs1>
1672   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1673     assert(N == 1 && "Invalid number of operands!");
1674     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1675     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1676   }
1677 
1678   void addImmOperands(MCInst &Inst, unsigned N) const {
1679     assert(N == 1 && "Invalid number of operands!");
1680     // If this is a pageoff symrefexpr with an addend, adjust the addend
1681     // to be only the page-offset portion. Otherwise, just add the expr
1682     // as-is.
1683     addExpr(Inst, getImm());
1684   }
1685 
1686   template <int Shift>
1687   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1688     assert(N == 2 && "Invalid number of operands!");
1689     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1690       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1691       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1692     } else if (isShiftedImm()) {
1693       addExpr(Inst, getShiftedImmVal());
1694       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1695     } else {
1696       addExpr(Inst, getImm());
1697       Inst.addOperand(MCOperand::createImm(0));
1698     }
1699   }
1700 
1701   template <int Shift>
1702   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1703     assert(N == 2 && "Invalid number of operands!");
1704     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1705       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1706       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1707     } else
1708       llvm_unreachable("Not a shifted negative immediate");
1709   }
1710 
1711   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1712     assert(N == 1 && "Invalid number of operands!");
1713     Inst.addOperand(MCOperand::createImm(getCondCode()));
1714   }
1715 
1716   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1717     assert(N == 1 && "Invalid number of operands!");
1718     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1719     if (!MCE)
1720       addExpr(Inst, getImm());
1721     else
1722       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1723   }
1724 
1725   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1726     addImmOperands(Inst, N);
1727   }
1728 
1729   template<int Scale>
1730   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1731     assert(N == 1 && "Invalid number of operands!");
1732     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1733 
1734     if (!MCE) {
1735       Inst.addOperand(MCOperand::createExpr(getImm()));
1736       return;
1737     }
1738     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1739   }
1740 
1741   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1742     assert(N == 1 && "Invalid number of operands!");
1743     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1744     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1745   }
1746 
1747   template <int Scale>
1748   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1749     assert(N == 1 && "Invalid number of operands!");
1750     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1751     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1752   }
1753 
1754   template <typename T>
1755   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1756     assert(N == 1 && "Invalid number of operands!");
1757     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1758     std::make_unsigned_t<T> Val = MCE->getValue();
1759     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1760     Inst.addOperand(MCOperand::createImm(encoding));
1761   }
1762 
1763   template <typename T>
1764   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1765     assert(N == 1 && "Invalid number of operands!");
1766     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1767     std::make_unsigned_t<T> Val = ~MCE->getValue();
1768     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1769     Inst.addOperand(MCOperand::createImm(encoding));
1770   }
1771 
1772   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1773     assert(N == 1 && "Invalid number of operands!");
1774     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1775     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1776     Inst.addOperand(MCOperand::createImm(encoding));
1777   }
1778 
1779   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1780     // Branch operands don't encode the low bits, so shift them off
1781     // here. If it's a label, however, just put it on directly as there's
1782     // not enough information now to do anything.
1783     assert(N == 1 && "Invalid number of operands!");
1784     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1785     if (!MCE) {
1786       addExpr(Inst, getImm());
1787       return;
1788     }
1789     assert(MCE && "Invalid constant immediate operand!");
1790     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1791   }
1792 
1793   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1794     // Branch operands don't encode the low bits, so shift them off
1795     // here. If it's a label, however, just put it on directly as there's
1796     // not enough information now to do anything.
1797     assert(N == 1 && "Invalid number of operands!");
1798     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1799     if (!MCE) {
1800       addExpr(Inst, getImm());
1801       return;
1802     }
1803     assert(MCE && "Invalid constant immediate operand!");
1804     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1805   }
1806 
1807   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1808     // Branch operands don't encode the low bits, so shift them off
1809     // here. If it's a label, however, just put it on directly as there's
1810     // not enough information now to do anything.
1811     assert(N == 1 && "Invalid number of operands!");
1812     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1813     if (!MCE) {
1814       addExpr(Inst, getImm());
1815       return;
1816     }
1817     assert(MCE && "Invalid constant immediate operand!");
1818     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1819   }
1820 
1821   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1822     assert(N == 1 && "Invalid number of operands!");
1823     Inst.addOperand(MCOperand::createImm(
1824         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1825   }
1826 
1827   void addBarrierOperands(MCInst &Inst, unsigned N) const {
1828     assert(N == 1 && "Invalid number of operands!");
1829     Inst.addOperand(MCOperand::createImm(getBarrier()));
1830   }
1831 
1832   void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1833     assert(N == 1 && "Invalid number of operands!");
1834     Inst.addOperand(MCOperand::createImm(getBarrier()));
1835   }
1836 
1837   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1838     assert(N == 1 && "Invalid number of operands!");
1839 
1840     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1841   }
1842 
1843   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1844     assert(N == 1 && "Invalid number of operands!");
1845 
1846     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1847   }
1848 
1849   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1850     assert(N == 1 && "Invalid number of operands!");
1851 
1852     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1853   }
1854 
1855   void addSVCROperands(MCInst &Inst, unsigned N) const {
1856     assert(N == 1 && "Invalid number of operands!");
1857 
1858     Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1859   }
1860 
1861   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1862     assert(N == 1 && "Invalid number of operands!");
1863 
1864     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1865   }
1866 
1867   void addSysCROperands(MCInst &Inst, unsigned N) const {
1868     assert(N == 1 && "Invalid number of operands!");
1869     Inst.addOperand(MCOperand::createImm(getSysCR()));
1870   }
1871 
1872   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1873     assert(N == 1 && "Invalid number of operands!");
1874     Inst.addOperand(MCOperand::createImm(getPrefetch()));
1875   }
1876 
1877   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1878     assert(N == 1 && "Invalid number of operands!");
1879     Inst.addOperand(MCOperand::createImm(getPSBHint()));
1880   }
1881 
1882   void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1883     assert(N == 1 && "Invalid number of operands!");
1884     Inst.addOperand(MCOperand::createImm(getBTIHint()));
1885   }
1886 
1887   void addShifterOperands(MCInst &Inst, unsigned N) const {
1888     assert(N == 1 && "Invalid number of operands!");
1889     unsigned Imm =
1890         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1891     Inst.addOperand(MCOperand::createImm(Imm));
1892   }
1893 
1894   void addExtendOperands(MCInst &Inst, unsigned N) const {
1895     assert(N == 1 && "Invalid number of operands!");
1896     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1897     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1898     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1899     Inst.addOperand(MCOperand::createImm(Imm));
1900   }
1901 
1902   void addExtend64Operands(MCInst &Inst, unsigned N) const {
1903     assert(N == 1 && "Invalid number of operands!");
1904     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1905     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1906     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1907     Inst.addOperand(MCOperand::createImm(Imm));
1908   }
1909 
1910   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1911     assert(N == 2 && "Invalid number of operands!");
1912     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1913     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1914     Inst.addOperand(MCOperand::createImm(IsSigned));
1915     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1916   }
1917 
1918   // For 8-bit load/store instructions with a register offset, both the
1919   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1920   // they're disambiguated by whether the shift was explicit or implicit rather
1921   // than its size.
1922   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1923     assert(N == 2 && "Invalid number of operands!");
1924     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1925     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1926     Inst.addOperand(MCOperand::createImm(IsSigned));
1927     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1928   }
1929 
1930   template<int Shift>
1931   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1932     assert(N == 1 && "Invalid number of operands!");
1933 
1934     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1935     if (CE) {
1936       uint64_t Value = CE->getValue();
1937       Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1938     } else {
1939       addExpr(Inst, getImm());
1940     }
1941   }
1942 
1943   template<int Shift>
1944   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1945     assert(N == 1 && "Invalid number of operands!");
1946 
1947     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1948     uint64_t Value = CE->getValue();
1949     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1950   }
1951 
1952   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1953     assert(N == 1 && "Invalid number of operands!");
1954     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1955     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1956   }
1957 
1958   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1959     assert(N == 1 && "Invalid number of operands!");
1960     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1961     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1962   }
1963 
1964   void print(raw_ostream &OS) const override;
1965 
1966   static std::unique_ptr<AArch64Operand>
1967   CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1968     auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1969     Op->Tok.Data = Str.data();
1970     Op->Tok.Length = Str.size();
1971     Op->Tok.IsSuffix = IsSuffix;
1972     Op->StartLoc = S;
1973     Op->EndLoc = S;
1974     return Op;
1975   }
1976 
1977   static std::unique_ptr<AArch64Operand>
1978   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1979             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1980             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1981             unsigned ShiftAmount = 0,
1982             unsigned HasExplicitAmount = false) {
1983     auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1984     Op->Reg.RegNum = RegNum;
1985     Op->Reg.Kind = Kind;
1986     Op->Reg.ElementWidth = 0;
1987     Op->Reg.EqualityTy = EqTy;
1988     Op->Reg.ShiftExtend.Type = ExtTy;
1989     Op->Reg.ShiftExtend.Amount = ShiftAmount;
1990     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1991     Op->StartLoc = S;
1992     Op->EndLoc = E;
1993     return Op;
1994   }
1995 
1996   static std::unique_ptr<AArch64Operand>
1997   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1998                   SMLoc S, SMLoc E, MCContext &Ctx,
1999                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2000                   unsigned ShiftAmount = 0,
2001                   unsigned HasExplicitAmount = false) {
2002     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2003             Kind == RegKind::SVEPredicateVector) &&
2004            "Invalid vector kind");
2005     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2006                         HasExplicitAmount);
2007     Op->Reg.ElementWidth = ElementWidth;
2008     return Op;
2009   }
2010 
2011   static std::unique_ptr<AArch64Operand>
2012   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2013                    unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2014                    MCContext &Ctx) {
2015     auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2016     Op->VectorList.RegNum = RegNum;
2017     Op->VectorList.Count = Count;
2018     Op->VectorList.NumElements = NumElements;
2019     Op->VectorList.ElementWidth = ElementWidth;
2020     Op->VectorList.RegisterKind = RegisterKind;
2021     Op->StartLoc = S;
2022     Op->EndLoc = E;
2023     return Op;
2024   }
2025 
2026   static std::unique_ptr<AArch64Operand>
2027   CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2028     auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2029     Op->VectorIndex.Val = Idx;
2030     Op->StartLoc = S;
2031     Op->EndLoc = E;
2032     return Op;
2033   }
2034 
2035   static std::unique_ptr<AArch64Operand>
2036   CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2037     auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2038     Op->MatrixTileList.RegMask = RegMask;
2039     Op->StartLoc = S;
2040     Op->EndLoc = E;
2041     return Op;
2042   }
2043 
2044   static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2045                                   const unsigned ElementWidth) {
2046     static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2047         RegMap = {
2048             {{0, AArch64::ZAB0},
2049              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2050               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2051             {{8, AArch64::ZAB0},
2052              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2053               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2054             {{16, AArch64::ZAH0},
2055              {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2056             {{16, AArch64::ZAH1},
2057              {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2058             {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2059             {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2060             {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2061             {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2062         };
2063 
2064     if (ElementWidth == 64)
2065       OutRegs.insert(Reg);
2066     else {
2067       std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2068       assert(!Regs.empty() && "Invalid tile or element width!");
2069       for (auto OutReg : Regs)
2070         OutRegs.insert(OutReg);
2071     }
2072   }
2073 
2074   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2075                                                    SMLoc E, MCContext &Ctx) {
2076     auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2077     Op->Imm.Val = Val;
2078     Op->StartLoc = S;
2079     Op->EndLoc = E;
2080     return Op;
2081   }
2082 
2083   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2084                                                           unsigned ShiftAmount,
2085                                                           SMLoc S, SMLoc E,
2086                                                           MCContext &Ctx) {
2087     auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2088     Op->ShiftedImm .Val = Val;
2089     Op->ShiftedImm.ShiftAmount = ShiftAmount;
2090     Op->StartLoc = S;
2091     Op->EndLoc = E;
2092     return Op;
2093   }
2094 
2095   static std::unique_ptr<AArch64Operand>
2096   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2097     auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2098     Op->CondCode.Code = Code;
2099     Op->StartLoc = S;
2100     Op->EndLoc = E;
2101     return Op;
2102   }
2103 
2104   static std::unique_ptr<AArch64Operand>
2105   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2106     auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2107     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2108     Op->FPImm.IsExact = IsExact;
2109     Op->StartLoc = S;
2110     Op->EndLoc = S;
2111     return Op;
2112   }
2113 
2114   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2115                                                        StringRef Str,
2116                                                        SMLoc S,
2117                                                        MCContext &Ctx,
2118                                                        bool HasnXSModifier) {
2119     auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2120     Op->Barrier.Val = Val;
2121     Op->Barrier.Data = Str.data();
2122     Op->Barrier.Length = Str.size();
2123     Op->Barrier.HasnXSModifier = HasnXSModifier;
2124     Op->StartLoc = S;
2125     Op->EndLoc = S;
2126     return Op;
2127   }
2128 
2129   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2130                                                       uint32_t MRSReg,
2131                                                       uint32_t MSRReg,
2132                                                       uint32_t PStateField,
2133                                                       MCContext &Ctx) {
2134     auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2135     Op->SysReg.Data = Str.data();
2136     Op->SysReg.Length = Str.size();
2137     Op->SysReg.MRSReg = MRSReg;
2138     Op->SysReg.MSRReg = MSRReg;
2139     Op->SysReg.PStateField = PStateField;
2140     Op->StartLoc = S;
2141     Op->EndLoc = S;
2142     return Op;
2143   }
2144 
2145   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2146                                                      SMLoc E, MCContext &Ctx) {
2147     auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2148     Op->SysCRImm.Val = Val;
2149     Op->StartLoc = S;
2150     Op->EndLoc = E;
2151     return Op;
2152   }
2153 
2154   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2155                                                         StringRef Str,
2156                                                         SMLoc S,
2157                                                         MCContext &Ctx) {
2158     auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2159     Op->Prefetch.Val = Val;
2160     Op->Barrier.Data = Str.data();
2161     Op->Barrier.Length = Str.size();
2162     Op->StartLoc = S;
2163     Op->EndLoc = S;
2164     return Op;
2165   }
2166 
2167   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2168                                                        StringRef Str,
2169                                                        SMLoc S,
2170                                                        MCContext &Ctx) {
2171     auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2172     Op->PSBHint.Val = Val;
2173     Op->PSBHint.Data = Str.data();
2174     Op->PSBHint.Length = Str.size();
2175     Op->StartLoc = S;
2176     Op->EndLoc = S;
2177     return Op;
2178   }
2179 
2180   static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2181                                                        StringRef Str,
2182                                                        SMLoc S,
2183                                                        MCContext &Ctx) {
2184     auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2185     Op->BTIHint.Val = Val | 32;
2186     Op->BTIHint.Data = Str.data();
2187     Op->BTIHint.Length = Str.size();
2188     Op->StartLoc = S;
2189     Op->EndLoc = S;
2190     return Op;
2191   }
2192 
2193   static std::unique_ptr<AArch64Operand>
2194   CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2195                        SMLoc S, SMLoc E, MCContext &Ctx) {
2196     auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2197     Op->MatrixReg.RegNum = RegNum;
2198     Op->MatrixReg.ElementWidth = ElementWidth;
2199     Op->MatrixReg.Kind = Kind;
2200     Op->StartLoc = S;
2201     Op->EndLoc = E;
2202     return Op;
2203   }
2204 
2205   static std::unique_ptr<AArch64Operand>
2206   CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2207     auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2208     Op->SVCR.PStateField = PStateField;
2209     Op->SVCR.Data = Str.data();
2210     Op->SVCR.Length = Str.size();
2211     Op->StartLoc = S;
2212     Op->EndLoc = S;
2213     return Op;
2214   }
2215 
2216   static std::unique_ptr<AArch64Operand>
2217   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2218                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2219     auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2220     Op->ShiftExtend.Type = ShOp;
2221     Op->ShiftExtend.Amount = Val;
2222     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2223     Op->StartLoc = S;
2224     Op->EndLoc = E;
2225     return Op;
2226   }
2227 };
2228 
2229 } // end anonymous namespace.
2230 
2231 void AArch64Operand::print(raw_ostream &OS) const {
2232   switch (Kind) {
2233   case k_FPImm:
2234     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2235     if (!getFPImmIsExact())
2236       OS << " (inexact)";
2237     OS << ">";
2238     break;
2239   case k_Barrier: {
2240     StringRef Name = getBarrierName();
2241     if (!Name.empty())
2242       OS << "<barrier " << Name << ">";
2243     else
2244       OS << "<barrier invalid #" << getBarrier() << ">";
2245     break;
2246   }
2247   case k_Immediate:
2248     OS << *getImm();
2249     break;
2250   case k_ShiftedImm: {
2251     unsigned Shift = getShiftedImmShift();
2252     OS << "<shiftedimm ";
2253     OS << *getShiftedImmVal();
2254     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2255     break;
2256   }
2257   case k_CondCode:
2258     OS << "<condcode " << getCondCode() << ">";
2259     break;
2260   case k_VectorList: {
2261     OS << "<vectorlist ";
2262     unsigned Reg = getVectorListStart();
2263     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2264       OS << Reg + i << " ";
2265     OS << ">";
2266     break;
2267   }
2268   case k_VectorIndex:
2269     OS << "<vectorindex " << getVectorIndex() << ">";
2270     break;
2271   case k_SysReg:
2272     OS << "<sysreg: " << getSysReg() << '>';
2273     break;
2274   case k_Token:
2275     OS << "'" << getToken() << "'";
2276     break;
2277   case k_SysCR:
2278     OS << "c" << getSysCR();
2279     break;
2280   case k_Prefetch: {
2281     StringRef Name = getPrefetchName();
2282     if (!Name.empty())
2283       OS << "<prfop " << Name << ">";
2284     else
2285       OS << "<prfop invalid #" << getPrefetch() << ">";
2286     break;
2287   }
2288   case k_PSBHint:
2289     OS << getPSBHintName();
2290     break;
2291   case k_BTIHint:
2292     OS << getBTIHintName();
2293     break;
2294   case k_MatrixRegister:
2295     OS << "<matrix " << getMatrixReg() << ">";
2296     break;
2297   case k_MatrixTileList: {
2298     OS << "<matrixlist ";
2299     unsigned RegMask = getMatrixTileListRegMask();
2300     unsigned MaxBits = 8;
2301     for (unsigned I = MaxBits; I > 0; --I)
2302       OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2303     OS << '>';
2304     break;
2305   }
2306   case k_SVCR: {
2307     OS << getSVCR();
2308     break;
2309   }
2310   case k_Register:
2311     OS << "<register " << getReg() << ">";
2312     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2313       break;
2314     LLVM_FALLTHROUGH;
2315   case k_ShiftExtend:
2316     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2317        << getShiftExtendAmount();
2318     if (!hasShiftExtendAmount())
2319       OS << "<imp>";
2320     OS << '>';
2321     break;
2322   }
2323 }
2324 
2325 /// @name Auto-generated Match Functions
2326 /// {
2327 
2328 static unsigned MatchRegisterName(StringRef Name);
2329 
2330 /// }
2331 
2332 static unsigned MatchNeonVectorRegName(StringRef Name) {
2333   return StringSwitch<unsigned>(Name.lower())
2334       .Case("v0", AArch64::Q0)
2335       .Case("v1", AArch64::Q1)
2336       .Case("v2", AArch64::Q2)
2337       .Case("v3", AArch64::Q3)
2338       .Case("v4", AArch64::Q4)
2339       .Case("v5", AArch64::Q5)
2340       .Case("v6", AArch64::Q6)
2341       .Case("v7", AArch64::Q7)
2342       .Case("v8", AArch64::Q8)
2343       .Case("v9", AArch64::Q9)
2344       .Case("v10", AArch64::Q10)
2345       .Case("v11", AArch64::Q11)
2346       .Case("v12", AArch64::Q12)
2347       .Case("v13", AArch64::Q13)
2348       .Case("v14", AArch64::Q14)
2349       .Case("v15", AArch64::Q15)
2350       .Case("v16", AArch64::Q16)
2351       .Case("v17", AArch64::Q17)
2352       .Case("v18", AArch64::Q18)
2353       .Case("v19", AArch64::Q19)
2354       .Case("v20", AArch64::Q20)
2355       .Case("v21", AArch64::Q21)
2356       .Case("v22", AArch64::Q22)
2357       .Case("v23", AArch64::Q23)
2358       .Case("v24", AArch64::Q24)
2359       .Case("v25", AArch64::Q25)
2360       .Case("v26", AArch64::Q26)
2361       .Case("v27", AArch64::Q27)
2362       .Case("v28", AArch64::Q28)
2363       .Case("v29", AArch64::Q29)
2364       .Case("v30", AArch64::Q30)
2365       .Case("v31", AArch64::Q31)
2366       .Default(0);
2367 }
2368 
2369 /// Returns an optional pair of (#elements, element-width) if Suffix
2370 /// is a valid vector kind. Where the number of elements in a vector
2371 /// or the vector width is implicit or explicitly unknown (but still a
2372 /// valid suffix kind), 0 is used.
2373 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2374                                                      RegKind VectorKind) {
2375   std::pair<int, int> Res = {-1, -1};
2376 
2377   switch (VectorKind) {
2378   case RegKind::NeonVector:
2379     Res =
2380         StringSwitch<std::pair<int, int>>(Suffix.lower())
2381             .Case("", {0, 0})
2382             .Case(".1d", {1, 64})
2383             .Case(".1q", {1, 128})
2384             // '.2h' needed for fp16 scalar pairwise reductions
2385             .Case(".2h", {2, 16})
2386             .Case(".2s", {2, 32})
2387             .Case(".2d", {2, 64})
2388             // '.4b' is another special case for the ARMv8.2a dot product
2389             // operand
2390             .Case(".4b", {4, 8})
2391             .Case(".4h", {4, 16})
2392             .Case(".4s", {4, 32})
2393             .Case(".8b", {8, 8})
2394             .Case(".8h", {8, 16})
2395             .Case(".16b", {16, 8})
2396             // Accept the width neutral ones, too, for verbose syntax. If those
2397             // aren't used in the right places, the token operand won't match so
2398             // all will work out.
2399             .Case(".b", {0, 8})
2400             .Case(".h", {0, 16})
2401             .Case(".s", {0, 32})
2402             .Case(".d", {0, 64})
2403             .Default({-1, -1});
2404     break;
2405   case RegKind::SVEPredicateVector:
2406   case RegKind::SVEDataVector:
2407   case RegKind::Matrix:
2408     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2409               .Case("", {0, 0})
2410               .Case(".b", {0, 8})
2411               .Case(".h", {0, 16})
2412               .Case(".s", {0, 32})
2413               .Case(".d", {0, 64})
2414               .Case(".q", {0, 128})
2415               .Default({-1, -1});
2416     break;
2417   default:
2418     llvm_unreachable("Unsupported RegKind");
2419   }
2420 
2421   if (Res == std::make_pair(-1, -1))
2422     return Optional<std::pair<int, int>>();
2423 
2424   return Optional<std::pair<int, int>>(Res);
2425 }
2426 
2427 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2428   return parseVectorKind(Suffix, VectorKind).hasValue();
2429 }
2430 
2431 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2432   return StringSwitch<unsigned>(Name.lower())
2433       .Case("z0", AArch64::Z0)
2434       .Case("z1", AArch64::Z1)
2435       .Case("z2", AArch64::Z2)
2436       .Case("z3", AArch64::Z3)
2437       .Case("z4", AArch64::Z4)
2438       .Case("z5", AArch64::Z5)
2439       .Case("z6", AArch64::Z6)
2440       .Case("z7", AArch64::Z7)
2441       .Case("z8", AArch64::Z8)
2442       .Case("z9", AArch64::Z9)
2443       .Case("z10", AArch64::Z10)
2444       .Case("z11", AArch64::Z11)
2445       .Case("z12", AArch64::Z12)
2446       .Case("z13", AArch64::Z13)
2447       .Case("z14", AArch64::Z14)
2448       .Case("z15", AArch64::Z15)
2449       .Case("z16", AArch64::Z16)
2450       .Case("z17", AArch64::Z17)
2451       .Case("z18", AArch64::Z18)
2452       .Case("z19", AArch64::Z19)
2453       .Case("z20", AArch64::Z20)
2454       .Case("z21", AArch64::Z21)
2455       .Case("z22", AArch64::Z22)
2456       .Case("z23", AArch64::Z23)
2457       .Case("z24", AArch64::Z24)
2458       .Case("z25", AArch64::Z25)
2459       .Case("z26", AArch64::Z26)
2460       .Case("z27", AArch64::Z27)
2461       .Case("z28", AArch64::Z28)
2462       .Case("z29", AArch64::Z29)
2463       .Case("z30", AArch64::Z30)
2464       .Case("z31", AArch64::Z31)
2465       .Default(0);
2466 }
2467 
2468 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2469   return StringSwitch<unsigned>(Name.lower())
2470       .Case("p0", AArch64::P0)
2471       .Case("p1", AArch64::P1)
2472       .Case("p2", AArch64::P2)
2473       .Case("p3", AArch64::P3)
2474       .Case("p4", AArch64::P4)
2475       .Case("p5", AArch64::P5)
2476       .Case("p6", AArch64::P6)
2477       .Case("p7", AArch64::P7)
2478       .Case("p8", AArch64::P8)
2479       .Case("p9", AArch64::P9)
2480       .Case("p10", AArch64::P10)
2481       .Case("p11", AArch64::P11)
2482       .Case("p12", AArch64::P12)
2483       .Case("p13", AArch64::P13)
2484       .Case("p14", AArch64::P14)
2485       .Case("p15", AArch64::P15)
2486       .Default(0);
2487 }
2488 
2489 static unsigned matchMatrixTileListRegName(StringRef Name) {
2490   return StringSwitch<unsigned>(Name.lower())
2491       .Case("za0.d", AArch64::ZAD0)
2492       .Case("za1.d", AArch64::ZAD1)
2493       .Case("za2.d", AArch64::ZAD2)
2494       .Case("za3.d", AArch64::ZAD3)
2495       .Case("za4.d", AArch64::ZAD4)
2496       .Case("za5.d", AArch64::ZAD5)
2497       .Case("za6.d", AArch64::ZAD6)
2498       .Case("za7.d", AArch64::ZAD7)
2499       .Case("za0.s", AArch64::ZAS0)
2500       .Case("za1.s", AArch64::ZAS1)
2501       .Case("za2.s", AArch64::ZAS2)
2502       .Case("za3.s", AArch64::ZAS3)
2503       .Case("za0.h", AArch64::ZAH0)
2504       .Case("za1.h", AArch64::ZAH1)
2505       .Case("za0.b", AArch64::ZAB0)
2506       .Default(0);
2507 }
2508 
2509 static unsigned matchMatrixRegName(StringRef Name) {
2510   return StringSwitch<unsigned>(Name.lower())
2511       .Case("za", AArch64::ZA)
2512       .Case("za0.q", AArch64::ZAQ0)
2513       .Case("za1.q", AArch64::ZAQ1)
2514       .Case("za2.q", AArch64::ZAQ2)
2515       .Case("za3.q", AArch64::ZAQ3)
2516       .Case("za4.q", AArch64::ZAQ4)
2517       .Case("za5.q", AArch64::ZAQ5)
2518       .Case("za6.q", AArch64::ZAQ6)
2519       .Case("za7.q", AArch64::ZAQ7)
2520       .Case("za8.q", AArch64::ZAQ8)
2521       .Case("za9.q", AArch64::ZAQ9)
2522       .Case("za10.q", AArch64::ZAQ10)
2523       .Case("za11.q", AArch64::ZAQ11)
2524       .Case("za12.q", AArch64::ZAQ12)
2525       .Case("za13.q", AArch64::ZAQ13)
2526       .Case("za14.q", AArch64::ZAQ14)
2527       .Case("za15.q", AArch64::ZAQ15)
2528       .Case("za0.d", AArch64::ZAD0)
2529       .Case("za1.d", AArch64::ZAD1)
2530       .Case("za2.d", AArch64::ZAD2)
2531       .Case("za3.d", AArch64::ZAD3)
2532       .Case("za4.d", AArch64::ZAD4)
2533       .Case("za5.d", AArch64::ZAD5)
2534       .Case("za6.d", AArch64::ZAD6)
2535       .Case("za7.d", AArch64::ZAD7)
2536       .Case("za0.s", AArch64::ZAS0)
2537       .Case("za1.s", AArch64::ZAS1)
2538       .Case("za2.s", AArch64::ZAS2)
2539       .Case("za3.s", AArch64::ZAS3)
2540       .Case("za0.h", AArch64::ZAH0)
2541       .Case("za1.h", AArch64::ZAH1)
2542       .Case("za0.b", AArch64::ZAB0)
2543       .Case("za0h.q", AArch64::ZAQ0)
2544       .Case("za1h.q", AArch64::ZAQ1)
2545       .Case("za2h.q", AArch64::ZAQ2)
2546       .Case("za3h.q", AArch64::ZAQ3)
2547       .Case("za4h.q", AArch64::ZAQ4)
2548       .Case("za5h.q", AArch64::ZAQ5)
2549       .Case("za6h.q", AArch64::ZAQ6)
2550       .Case("za7h.q", AArch64::ZAQ7)
2551       .Case("za8h.q", AArch64::ZAQ8)
2552       .Case("za9h.q", AArch64::ZAQ9)
2553       .Case("za10h.q", AArch64::ZAQ10)
2554       .Case("za11h.q", AArch64::ZAQ11)
2555       .Case("za12h.q", AArch64::ZAQ12)
2556       .Case("za13h.q", AArch64::ZAQ13)
2557       .Case("za14h.q", AArch64::ZAQ14)
2558       .Case("za15h.q", AArch64::ZAQ15)
2559       .Case("za0h.d", AArch64::ZAD0)
2560       .Case("za1h.d", AArch64::ZAD1)
2561       .Case("za2h.d", AArch64::ZAD2)
2562       .Case("za3h.d", AArch64::ZAD3)
2563       .Case("za4h.d", AArch64::ZAD4)
2564       .Case("za5h.d", AArch64::ZAD5)
2565       .Case("za6h.d", AArch64::ZAD6)
2566       .Case("za7h.d", AArch64::ZAD7)
2567       .Case("za0h.s", AArch64::ZAS0)
2568       .Case("za1h.s", AArch64::ZAS1)
2569       .Case("za2h.s", AArch64::ZAS2)
2570       .Case("za3h.s", AArch64::ZAS3)
2571       .Case("za0h.h", AArch64::ZAH0)
2572       .Case("za1h.h", AArch64::ZAH1)
2573       .Case("za0h.b", AArch64::ZAB0)
2574       .Case("za0v.q", AArch64::ZAQ0)
2575       .Case("za1v.q", AArch64::ZAQ1)
2576       .Case("za2v.q", AArch64::ZAQ2)
2577       .Case("za3v.q", AArch64::ZAQ3)
2578       .Case("za4v.q", AArch64::ZAQ4)
2579       .Case("za5v.q", AArch64::ZAQ5)
2580       .Case("za6v.q", AArch64::ZAQ6)
2581       .Case("za7v.q", AArch64::ZAQ7)
2582       .Case("za8v.q", AArch64::ZAQ8)
2583       .Case("za9v.q", AArch64::ZAQ9)
2584       .Case("za10v.q", AArch64::ZAQ10)
2585       .Case("za11v.q", AArch64::ZAQ11)
2586       .Case("za12v.q", AArch64::ZAQ12)
2587       .Case("za13v.q", AArch64::ZAQ13)
2588       .Case("za14v.q", AArch64::ZAQ14)
2589       .Case("za15v.q", AArch64::ZAQ15)
2590       .Case("za0v.d", AArch64::ZAD0)
2591       .Case("za1v.d", AArch64::ZAD1)
2592       .Case("za2v.d", AArch64::ZAD2)
2593       .Case("za3v.d", AArch64::ZAD3)
2594       .Case("za4v.d", AArch64::ZAD4)
2595       .Case("za5v.d", AArch64::ZAD5)
2596       .Case("za6v.d", AArch64::ZAD6)
2597       .Case("za7v.d", AArch64::ZAD7)
2598       .Case("za0v.s", AArch64::ZAS0)
2599       .Case("za1v.s", AArch64::ZAS1)
2600       .Case("za2v.s", AArch64::ZAS2)
2601       .Case("za3v.s", AArch64::ZAS3)
2602       .Case("za0v.h", AArch64::ZAH0)
2603       .Case("za1v.h", AArch64::ZAH1)
2604       .Case("za0v.b", AArch64::ZAB0)
2605       .Default(0);
2606 }
2607 
2608 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2609                                      SMLoc &EndLoc) {
2610   return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2611 }
2612 
2613 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2614                                                         SMLoc &StartLoc,
2615                                                         SMLoc &EndLoc) {
2616   StartLoc = getLoc();
2617   auto Res = tryParseScalarRegister(RegNo);
2618   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2619   return Res;
2620 }
2621 
2622 // Matches a register name or register alias previously defined by '.req'
2623 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2624                                                   RegKind Kind) {
2625   unsigned RegNum = 0;
2626   if ((RegNum = matchSVEDataVectorRegName(Name)))
2627     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2628 
2629   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2630     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2631 
2632   if ((RegNum = MatchNeonVectorRegName(Name)))
2633     return Kind == RegKind::NeonVector ? RegNum : 0;
2634 
2635   if ((RegNum = matchMatrixRegName(Name)))
2636     return Kind == RegKind::Matrix ? RegNum : 0;
2637 
2638   // The parsed register must be of RegKind Scalar
2639   if ((RegNum = MatchRegisterName(Name)))
2640     return Kind == RegKind::Scalar ? RegNum : 0;
2641 
2642   if (!RegNum) {
2643     // Handle a few common aliases of registers.
2644     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2645                     .Case("fp", AArch64::FP)
2646                     .Case("lr",  AArch64::LR)
2647                     .Case("x31", AArch64::XZR)
2648                     .Case("w31", AArch64::WZR)
2649                     .Default(0))
2650       return Kind == RegKind::Scalar ? RegNum : 0;
2651 
2652     // Check for aliases registered via .req. Canonicalize to lower case.
2653     // That's more consistent since register names are case insensitive, and
2654     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2655     auto Entry = RegisterReqs.find(Name.lower());
2656     if (Entry == RegisterReqs.end())
2657       return 0;
2658 
2659     // set RegNum if the match is the right kind of register
2660     if (Kind == Entry->getValue().first)
2661       RegNum = Entry->getValue().second;
2662   }
2663   return RegNum;
2664 }
2665 
2666 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2667 /// Identifier when called, and if it is a register name the token is eaten and
2668 /// the register is added to the operand list.
2669 OperandMatchResultTy
2670 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2671   const AsmToken &Tok = getTok();
2672   if (Tok.isNot(AsmToken::Identifier))
2673     return MatchOperand_NoMatch;
2674 
2675   std::string lowerCase = Tok.getString().lower();
2676   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2677   if (Reg == 0)
2678     return MatchOperand_NoMatch;
2679 
2680   RegNum = Reg;
2681   Lex(); // Eat identifier token.
2682   return MatchOperand_Success;
2683 }
2684 
2685 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2686 OperandMatchResultTy
2687 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2688   SMLoc S = getLoc();
2689 
2690   if (getTok().isNot(AsmToken::Identifier)) {
2691     Error(S, "Expected cN operand where 0 <= N <= 15");
2692     return MatchOperand_ParseFail;
2693   }
2694 
2695   StringRef Tok = getTok().getIdentifier();
2696   if (Tok[0] != 'c' && Tok[0] != 'C') {
2697     Error(S, "Expected cN operand where 0 <= N <= 15");
2698     return MatchOperand_ParseFail;
2699   }
2700 
2701   uint32_t CRNum;
2702   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2703   if (BadNum || CRNum > 15) {
2704     Error(S, "Expected cN operand where 0 <= N <= 15");
2705     return MatchOperand_ParseFail;
2706   }
2707 
2708   Lex(); // Eat identifier token.
2709   Operands.push_back(
2710       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2711   return MatchOperand_Success;
2712 }
2713 
2714 /// tryParsePrefetch - Try to parse a prefetch operand.
2715 template <bool IsSVEPrefetch>
2716 OperandMatchResultTy
2717 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2718   SMLoc S = getLoc();
2719   const AsmToken &Tok = getTok();
2720 
2721   auto LookupByName = [](StringRef N) {
2722     if (IsSVEPrefetch) {
2723       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2724         return Optional<unsigned>(Res->Encoding);
2725     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2726       return Optional<unsigned>(Res->Encoding);
2727     return Optional<unsigned>();
2728   };
2729 
2730   auto LookupByEncoding = [](unsigned E) {
2731     if (IsSVEPrefetch) {
2732       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2733         return Optional<StringRef>(Res->Name);
2734     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2735       return Optional<StringRef>(Res->Name);
2736     return Optional<StringRef>();
2737   };
2738   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2739 
2740   // Either an identifier for named values or a 5-bit immediate.
2741   // Eat optional hash.
2742   if (parseOptionalToken(AsmToken::Hash) ||
2743       Tok.is(AsmToken::Integer)) {
2744     const MCExpr *ImmVal;
2745     if (getParser().parseExpression(ImmVal))
2746       return MatchOperand_ParseFail;
2747 
2748     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2749     if (!MCE) {
2750       TokError("immediate value expected for prefetch operand");
2751       return MatchOperand_ParseFail;
2752     }
2753     unsigned prfop = MCE->getValue();
2754     if (prfop > MaxVal) {
2755       TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2756                "] expected");
2757       return MatchOperand_ParseFail;
2758     }
2759 
2760     auto PRFM = LookupByEncoding(MCE->getValue());
2761     Operands.push_back(AArch64Operand::CreatePrefetch(
2762         prfop, PRFM.getValueOr(""), S, getContext()));
2763     return MatchOperand_Success;
2764   }
2765 
2766   if (Tok.isNot(AsmToken::Identifier)) {
2767     TokError("prefetch hint expected");
2768     return MatchOperand_ParseFail;
2769   }
2770 
2771   auto PRFM = LookupByName(Tok.getString());
2772   if (!PRFM) {
2773     TokError("prefetch hint expected");
2774     return MatchOperand_ParseFail;
2775   }
2776 
2777   Operands.push_back(AArch64Operand::CreatePrefetch(
2778       *PRFM, Tok.getString(), S, getContext()));
2779   Lex(); // Eat identifier token.
2780   return MatchOperand_Success;
2781 }
2782 
2783 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2784 OperandMatchResultTy
2785 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2786   SMLoc S = getLoc();
2787   const AsmToken &Tok = getTok();
2788   if (Tok.isNot(AsmToken::Identifier)) {
2789     TokError("invalid operand for instruction");
2790     return MatchOperand_ParseFail;
2791   }
2792 
2793   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2794   if (!PSB) {
2795     TokError("invalid operand for instruction");
2796     return MatchOperand_ParseFail;
2797   }
2798 
2799   Operands.push_back(AArch64Operand::CreatePSBHint(
2800       PSB->Encoding, Tok.getString(), S, getContext()));
2801   Lex(); // Eat identifier token.
2802   return MatchOperand_Success;
2803 }
2804 
2805 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2806 OperandMatchResultTy
2807 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2808   SMLoc S = getLoc();
2809   const AsmToken &Tok = getTok();
2810   if (Tok.isNot(AsmToken::Identifier)) {
2811     TokError("invalid operand for instruction");
2812     return MatchOperand_ParseFail;
2813   }
2814 
2815   auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2816   if (!BTI) {
2817     TokError("invalid operand for instruction");
2818     return MatchOperand_ParseFail;
2819   }
2820 
2821   Operands.push_back(AArch64Operand::CreateBTIHint(
2822       BTI->Encoding, Tok.getString(), S, getContext()));
2823   Lex(); // Eat identifier token.
2824   return MatchOperand_Success;
2825 }
2826 
2827 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2828 /// instruction.
2829 OperandMatchResultTy
2830 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2831   SMLoc S = getLoc();
2832   const MCExpr *Expr = nullptr;
2833 
2834   if (getTok().is(AsmToken::Hash)) {
2835     Lex(); // Eat hash token.
2836   }
2837 
2838   if (parseSymbolicImmVal(Expr))
2839     return MatchOperand_ParseFail;
2840 
2841   AArch64MCExpr::VariantKind ELFRefKind;
2842   MCSymbolRefExpr::VariantKind DarwinRefKind;
2843   int64_t Addend;
2844   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2845     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2846         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2847       // No modifier was specified at all; this is the syntax for an ELF basic
2848       // ADRP relocation (unfortunately).
2849       Expr =
2850           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2851     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2852                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2853                Addend != 0) {
2854       Error(S, "gotpage label reference not allowed an addend");
2855       return MatchOperand_ParseFail;
2856     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2857                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2858                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2859                ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2860                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2861                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2862                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2863                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2864       // The operand must be an @page or @gotpage qualified symbolref.
2865       Error(S, "page or gotpage label reference expected");
2866       return MatchOperand_ParseFail;
2867     }
2868   }
2869 
2870   // We have either a label reference possibly with addend or an immediate. The
2871   // addend is a raw value here. The linker will adjust it to only reference the
2872   // page.
2873   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2874   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2875 
2876   return MatchOperand_Success;
2877 }
2878 
2879 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2880 /// instruction.
2881 OperandMatchResultTy
2882 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2883   SMLoc S = getLoc();
2884   const MCExpr *Expr = nullptr;
2885 
2886   // Leave anything with a bracket to the default for SVE
2887   if (getTok().is(AsmToken::LBrac))
2888     return MatchOperand_NoMatch;
2889 
2890   if (getTok().is(AsmToken::Hash))
2891     Lex(); // Eat hash token.
2892 
2893   if (parseSymbolicImmVal(Expr))
2894     return MatchOperand_ParseFail;
2895 
2896   AArch64MCExpr::VariantKind ELFRefKind;
2897   MCSymbolRefExpr::VariantKind DarwinRefKind;
2898   int64_t Addend;
2899   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2900     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2901         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2902       // No modifier was specified at all; this is the syntax for an ELF basic
2903       // ADR relocation (unfortunately).
2904       Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2905     } else {
2906       Error(S, "unexpected adr label");
2907       return MatchOperand_ParseFail;
2908     }
2909   }
2910 
2911   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2912   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2913   return MatchOperand_Success;
2914 }
2915 
2916 /// tryParseFPImm - A floating point immediate expression operand.
2917 template<bool AddFPZeroAsLiteral>
2918 OperandMatchResultTy
2919 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2920   SMLoc S = getLoc();
2921 
2922   bool Hash = parseOptionalToken(AsmToken::Hash);
2923 
2924   // Handle negation, as that still comes through as a separate token.
2925   bool isNegative = parseOptionalToken(AsmToken::Minus);
2926 
2927   const AsmToken &Tok = getTok();
2928   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2929     if (!Hash)
2930       return MatchOperand_NoMatch;
2931     TokError("invalid floating point immediate");
2932     return MatchOperand_ParseFail;
2933   }
2934 
2935   // Parse hexadecimal representation.
2936   if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2937     if (Tok.getIntVal() > 255 || isNegative) {
2938       TokError("encoded floating point value out of range");
2939       return MatchOperand_ParseFail;
2940     }
2941 
2942     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2943     Operands.push_back(
2944         AArch64Operand::CreateFPImm(F, true, S, getContext()));
2945   } else {
2946     // Parse FP representation.
2947     APFloat RealVal(APFloat::IEEEdouble());
2948     auto StatusOrErr =
2949         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2950     if (errorToBool(StatusOrErr.takeError())) {
2951       TokError("invalid floating point representation");
2952       return MatchOperand_ParseFail;
2953     }
2954 
2955     if (isNegative)
2956       RealVal.changeSign();
2957 
2958     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2959       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2960       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2961     } else
2962       Operands.push_back(AArch64Operand::CreateFPImm(
2963           RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2964   }
2965 
2966   Lex(); // Eat the token.
2967 
2968   return MatchOperand_Success;
2969 }
2970 
2971 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2972 /// a shift suffix, for example '#1, lsl #12'.
2973 OperandMatchResultTy
2974 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2975   SMLoc S = getLoc();
2976 
2977   if (getTok().is(AsmToken::Hash))
2978     Lex(); // Eat '#'
2979   else if (getTok().isNot(AsmToken::Integer))
2980     // Operand should start from # or should be integer, emit error otherwise.
2981     return MatchOperand_NoMatch;
2982 
2983   const MCExpr *Imm = nullptr;
2984   if (parseSymbolicImmVal(Imm))
2985     return MatchOperand_ParseFail;
2986   else if (getTok().isNot(AsmToken::Comma)) {
2987     Operands.push_back(
2988         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
2989     return MatchOperand_Success;
2990   }
2991 
2992   // Eat ','
2993   Lex();
2994 
2995   // The optional operand must be "lsl #N" where N is non-negative.
2996   if (!getTok().is(AsmToken::Identifier) ||
2997       !getTok().getIdentifier().equals_insensitive("lsl")) {
2998     Error(getLoc(), "only 'lsl #+N' valid after immediate");
2999     return MatchOperand_ParseFail;
3000   }
3001 
3002   // Eat 'lsl'
3003   Lex();
3004 
3005   parseOptionalToken(AsmToken::Hash);
3006 
3007   if (getTok().isNot(AsmToken::Integer)) {
3008     Error(getLoc(), "only 'lsl #+N' valid after immediate");
3009     return MatchOperand_ParseFail;
3010   }
3011 
3012   int64_t ShiftAmount = getTok().getIntVal();
3013 
3014   if (ShiftAmount < 0) {
3015     Error(getLoc(), "positive shift amount required");
3016     return MatchOperand_ParseFail;
3017   }
3018   Lex(); // Eat the number
3019 
3020   // Just in case the optional lsl #0 is used for immediates other than zero.
3021   if (ShiftAmount == 0 && Imm != nullptr) {
3022     Operands.push_back(
3023         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3024     return MatchOperand_Success;
3025   }
3026 
3027   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3028                                                       getLoc(), getContext()));
3029   return MatchOperand_Success;
3030 }
3031 
3032 /// parseCondCodeString - Parse a Condition Code string.
3033 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3034   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3035                     .Case("eq", AArch64CC::EQ)
3036                     .Case("ne", AArch64CC::NE)
3037                     .Case("cs", AArch64CC::HS)
3038                     .Case("hs", AArch64CC::HS)
3039                     .Case("cc", AArch64CC::LO)
3040                     .Case("lo", AArch64CC::LO)
3041                     .Case("mi", AArch64CC::MI)
3042                     .Case("pl", AArch64CC::PL)
3043                     .Case("vs", AArch64CC::VS)
3044                     .Case("vc", AArch64CC::VC)
3045                     .Case("hi", AArch64CC::HI)
3046                     .Case("ls", AArch64CC::LS)
3047                     .Case("ge", AArch64CC::GE)
3048                     .Case("lt", AArch64CC::LT)
3049                     .Case("gt", AArch64CC::GT)
3050                     .Case("le", AArch64CC::LE)
3051                     .Case("al", AArch64CC::AL)
3052                     .Case("nv", AArch64CC::NV)
3053                     .Default(AArch64CC::Invalid);
3054 
3055   if (CC == AArch64CC::Invalid &&
3056       getSTI().getFeatureBits()[AArch64::FeatureSVE])
3057     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3058                     .Case("none",  AArch64CC::EQ)
3059                     .Case("any",   AArch64CC::NE)
3060                     .Case("nlast", AArch64CC::HS)
3061                     .Case("last",  AArch64CC::LO)
3062                     .Case("first", AArch64CC::MI)
3063                     .Case("nfrst", AArch64CC::PL)
3064                     .Case("pmore", AArch64CC::HI)
3065                     .Case("plast", AArch64CC::LS)
3066                     .Case("tcont", AArch64CC::GE)
3067                     .Case("tstop", AArch64CC::LT)
3068                     .Default(AArch64CC::Invalid);
3069 
3070   return CC;
3071 }
3072 
3073 /// parseCondCode - Parse a Condition Code operand.
3074 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3075                                      bool invertCondCode) {
3076   SMLoc S = getLoc();
3077   const AsmToken &Tok = getTok();
3078   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3079 
3080   StringRef Cond = Tok.getString();
3081   AArch64CC::CondCode CC = parseCondCodeString(Cond);
3082   if (CC == AArch64CC::Invalid)
3083     return TokError("invalid condition code");
3084   Lex(); // Eat identifier token.
3085 
3086   if (invertCondCode) {
3087     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3088       return TokError("condition codes AL and NV are invalid for this instruction");
3089     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3090   }
3091 
3092   Operands.push_back(
3093       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3094   return false;
3095 }
3096 
3097 OperandMatchResultTy
3098 AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3099   const AsmToken &Tok = getTok();
3100   SMLoc S = getLoc();
3101 
3102   if (Tok.isNot(AsmToken::Identifier)) {
3103     TokError("invalid operand for instruction");
3104     return MatchOperand_ParseFail;
3105   }
3106 
3107   unsigned PStateImm = -1;
3108   const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3109   if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3110     PStateImm = SVCR->Encoding;
3111 
3112   Operands.push_back(
3113       AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3114   Lex(); // Eat identifier token.
3115   return MatchOperand_Success;
3116 }
3117 
3118 OperandMatchResultTy
3119 AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3120   const AsmToken &Tok = getTok();
3121   SMLoc S = getLoc();
3122 
3123   StringRef Name = Tok.getString();
3124 
3125   if (Name.equals_insensitive("za")) {
3126     Lex(); // eat "za"
3127     Operands.push_back(AArch64Operand::CreateMatrixRegister(
3128         AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3129         getContext()));
3130     if (getLexer().is(AsmToken::LBrac)) {
3131       // There's no comma after matrix operand, so we can parse the next operand
3132       // immediately.
3133       if (parseOperand(Operands, false, false))
3134         return MatchOperand_NoMatch;
3135     }
3136     return MatchOperand_Success;
3137   }
3138 
3139   // Try to parse matrix register.
3140   unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3141   if (!Reg)
3142     return MatchOperand_NoMatch;
3143 
3144   size_t DotPosition = Name.find('.');
3145   assert(DotPosition != StringRef::npos && "Unexpected register");
3146 
3147   StringRef Head = Name.take_front(DotPosition);
3148   StringRef Tail = Name.drop_front(DotPosition);
3149   StringRef RowOrColumn = Head.take_back();
3150 
3151   MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3152                         .Case("h", MatrixKind::Row)
3153                         .Case("v", MatrixKind::Col)
3154                         .Default(MatrixKind::Tile);
3155 
3156   // Next up, parsing the suffix
3157   const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3158   if (!KindRes) {
3159     TokError("Expected the register to be followed by element width suffix");
3160     return MatchOperand_ParseFail;
3161   }
3162   unsigned ElementWidth = KindRes->second;
3163 
3164   Lex();
3165 
3166   Operands.push_back(AArch64Operand::CreateMatrixRegister(
3167       Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3168 
3169   if (getLexer().is(AsmToken::LBrac)) {
3170     // There's no comma after matrix operand, so we can parse the next operand
3171     // immediately.
3172     if (parseOperand(Operands, false, false))
3173       return MatchOperand_NoMatch;
3174   }
3175   return MatchOperand_Success;
3176 }
3177 
3178 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3179 /// them if present.
3180 OperandMatchResultTy
3181 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3182   const AsmToken &Tok = getTok();
3183   std::string LowerID = Tok.getString().lower();
3184   AArch64_AM::ShiftExtendType ShOp =
3185       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3186           .Case("lsl", AArch64_AM::LSL)
3187           .Case("lsr", AArch64_AM::LSR)
3188           .Case("asr", AArch64_AM::ASR)
3189           .Case("ror", AArch64_AM::ROR)
3190           .Case("msl", AArch64_AM::MSL)
3191           .Case("uxtb", AArch64_AM::UXTB)
3192           .Case("uxth", AArch64_AM::UXTH)
3193           .Case("uxtw", AArch64_AM::UXTW)
3194           .Case("uxtx", AArch64_AM::UXTX)
3195           .Case("sxtb", AArch64_AM::SXTB)
3196           .Case("sxth", AArch64_AM::SXTH)
3197           .Case("sxtw", AArch64_AM::SXTW)
3198           .Case("sxtx", AArch64_AM::SXTX)
3199           .Default(AArch64_AM::InvalidShiftExtend);
3200 
3201   if (ShOp == AArch64_AM::InvalidShiftExtend)
3202     return MatchOperand_NoMatch;
3203 
3204   SMLoc S = Tok.getLoc();
3205   Lex();
3206 
3207   bool Hash = parseOptionalToken(AsmToken::Hash);
3208 
3209   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3210     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3211         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3212         ShOp == AArch64_AM::MSL) {
3213       // We expect a number here.
3214       TokError("expected #imm after shift specifier");
3215       return MatchOperand_ParseFail;
3216     }
3217 
3218     // "extend" type operations don't need an immediate, #0 is implicit.
3219     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3220     Operands.push_back(
3221         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3222     return MatchOperand_Success;
3223   }
3224 
3225   // Make sure we do actually have a number, identifier or a parenthesized
3226   // expression.
3227   SMLoc E = getLoc();
3228   if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3229       !getTok().is(AsmToken::Identifier)) {
3230     Error(E, "expected integer shift amount");
3231     return MatchOperand_ParseFail;
3232   }
3233 
3234   const MCExpr *ImmVal;
3235   if (getParser().parseExpression(ImmVal))
3236     return MatchOperand_ParseFail;
3237 
3238   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3239   if (!MCE) {
3240     Error(E, "expected constant '#imm' after shift specifier");
3241     return MatchOperand_ParseFail;
3242   }
3243 
3244   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3245   Operands.push_back(AArch64Operand::CreateShiftExtend(
3246       ShOp, MCE->getValue(), true, S, E, getContext()));
3247   return MatchOperand_Success;
3248 }
3249 
3250 static const struct Extension {
3251   const char *Name;
3252   const FeatureBitset Features;
3253 } ExtensionMap[] = {
3254     {"crc", {AArch64::FeatureCRC}},
3255     {"sm4", {AArch64::FeatureSM4}},
3256     {"sha3", {AArch64::FeatureSHA3}},
3257     {"sha2", {AArch64::FeatureSHA2}},
3258     {"aes", {AArch64::FeatureAES}},
3259     {"crypto", {AArch64::FeatureCrypto}},
3260     {"fp", {AArch64::FeatureFPARMv8}},
3261     {"simd", {AArch64::FeatureNEON}},
3262     {"ras", {AArch64::FeatureRAS}},
3263     {"lse", {AArch64::FeatureLSE}},
3264     {"predres", {AArch64::FeaturePredRes}},
3265     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3266     {"mte", {AArch64::FeatureMTE}},
3267     {"memtag", {AArch64::FeatureMTE}},
3268     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3269     {"pan", {AArch64::FeaturePAN}},
3270     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3271     {"ccpp", {AArch64::FeatureCCPP}},
3272     {"rcpc", {AArch64::FeatureRCPC}},
3273     {"rng", {AArch64::FeatureRandGen}},
3274     {"sve", {AArch64::FeatureSVE}},
3275     {"sve2", {AArch64::FeatureSVE2}},
3276     {"sve2-aes", {AArch64::FeatureSVE2AES}},
3277     {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3278     {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3279     {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3280     {"ls64", {AArch64::FeatureLS64}},
3281     {"xs", {AArch64::FeatureXS}},
3282     {"pauth", {AArch64::FeaturePAuth}},
3283     {"flagm", {AArch64::FeatureFlagM}},
3284     {"rme", {AArch64::FeatureRME}},
3285     {"sme", {AArch64::FeatureSME}},
3286     {"sme-f64", {AArch64::FeatureSMEF64}},
3287     {"sme-i64", {AArch64::FeatureSMEI64}},
3288     {"hbc", {AArch64::FeatureHBC}},
3289     {"mops", {AArch64::FeatureMOPS}},
3290     // FIXME: Unsupported extensions
3291     {"lor", {}},
3292     {"rdma", {}},
3293     {"profile", {}},
3294 };
3295 
3296 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3297   if (FBS[AArch64::HasV8_0aOps])
3298     Str += "ARMv8a";
3299   if (FBS[AArch64::HasV8_1aOps])
3300     Str += "ARMv8.1a";
3301   else if (FBS[AArch64::HasV8_2aOps])
3302     Str += "ARMv8.2a";
3303   else if (FBS[AArch64::HasV8_3aOps])
3304     Str += "ARMv8.3a";
3305   else if (FBS[AArch64::HasV8_4aOps])
3306     Str += "ARMv8.4a";
3307   else if (FBS[AArch64::HasV8_5aOps])
3308     Str += "ARMv8.5a";
3309   else if (FBS[AArch64::HasV8_6aOps])
3310     Str += "ARMv8.6a";
3311   else if (FBS[AArch64::HasV8_7aOps])
3312     Str += "ARMv8.7a";
3313   else if (FBS[AArch64::HasV8_8aOps])
3314     Str += "ARMv8.8a";
3315   else if (FBS[AArch64::HasV9_0aOps])
3316     Str += "ARMv9-a";
3317   else if (FBS[AArch64::HasV9_1aOps])
3318     Str += "ARMv9.1a";
3319   else if (FBS[AArch64::HasV9_2aOps])
3320     Str += "ARMv9.2a";
3321   else if (FBS[AArch64::HasV9_3aOps])
3322     Str += "ARMv9.3a";
3323   else if (FBS[AArch64::HasV8_0rOps])
3324     Str += "ARMv8r";
3325   else {
3326     SmallVector<std::string, 2> ExtMatches;
3327     for (const auto& Ext : ExtensionMap) {
3328       // Use & in case multiple features are enabled
3329       if ((FBS & Ext.Features) != FeatureBitset())
3330         ExtMatches.push_back(Ext.Name);
3331     }
3332     Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3333   }
3334 }
3335 
3336 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3337                                       SMLoc S) {
3338   const uint16_t Op2 = Encoding & 7;
3339   const uint16_t Cm = (Encoding & 0x78) >> 3;
3340   const uint16_t Cn = (Encoding & 0x780) >> 7;
3341   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3342 
3343   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3344 
3345   Operands.push_back(
3346       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3347   Operands.push_back(
3348       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3349   Operands.push_back(
3350       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3351   Expr = MCConstantExpr::create(Op2, getContext());
3352   Operands.push_back(
3353       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3354 }
3355 
3356 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3357 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3358 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3359                                    OperandVector &Operands) {
3360   if (Name.contains('.'))
3361     return TokError("invalid operand");
3362 
3363   Mnemonic = Name;
3364   Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3365 
3366   const AsmToken &Tok = getTok();
3367   StringRef Op = Tok.getString();
3368   SMLoc S = Tok.getLoc();
3369 
3370   if (Mnemonic == "ic") {
3371     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3372     if (!IC)
3373       return TokError("invalid operand for IC instruction");
3374     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3375       std::string Str("IC " + std::string(IC->Name) + " requires: ");
3376       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3377       return TokError(Str);
3378     }
3379     createSysAlias(IC->Encoding, Operands, S);
3380   } else if (Mnemonic == "dc") {
3381     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3382     if (!DC)
3383       return TokError("invalid operand for DC instruction");
3384     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3385       std::string Str("DC " + std::string(DC->Name) + " requires: ");
3386       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3387       return TokError(Str);
3388     }
3389     createSysAlias(DC->Encoding, Operands, S);
3390   } else if (Mnemonic == "at") {
3391     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3392     if (!AT)
3393       return TokError("invalid operand for AT instruction");
3394     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3395       std::string Str("AT " + std::string(AT->Name) + " requires: ");
3396       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3397       return TokError(Str);
3398     }
3399     createSysAlias(AT->Encoding, Operands, S);
3400   } else if (Mnemonic == "tlbi") {
3401     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3402     if (!TLBI)
3403       return TokError("invalid operand for TLBI instruction");
3404     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3405       std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3406       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3407       return TokError(Str);
3408     }
3409     createSysAlias(TLBI->Encoding, Operands, S);
3410   } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3411     const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3412     if (!PRCTX)
3413       return TokError("invalid operand for prediction restriction instruction");
3414     else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3415       std::string Str(
3416           Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3417       setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3418       return TokError(Str);
3419     }
3420     uint16_t PRCTX_Op2 =
3421       Mnemonic == "cfp" ? 4 :
3422       Mnemonic == "dvp" ? 5 :
3423       Mnemonic == "cpp" ? 7 :
3424       0;
3425     assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3426     createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3427   }
3428 
3429   Lex(); // Eat operand.
3430 
3431   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3432   bool HasRegister = false;
3433 
3434   // Check for the optional register operand.
3435   if (parseOptionalToken(AsmToken::Comma)) {
3436     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3437       return TokError("expected register operand");
3438     HasRegister = true;
3439   }
3440 
3441   if (ExpectRegister && !HasRegister)
3442     return TokError("specified " + Mnemonic + " op requires a register");
3443   else if (!ExpectRegister && HasRegister)
3444     return TokError("specified " + Mnemonic + " op does not use a register");
3445 
3446   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3447     return true;
3448 
3449   return false;
3450 }
3451 
3452 OperandMatchResultTy
3453 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3454   MCAsmParser &Parser = getParser();
3455   const AsmToken &Tok = getTok();
3456 
3457   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3458     TokError("'csync' operand expected");
3459     return MatchOperand_ParseFail;
3460   } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3461     // Immediate operand.
3462     const MCExpr *ImmVal;
3463     SMLoc ExprLoc = getLoc();
3464     AsmToken IntTok = Tok;
3465     if (getParser().parseExpression(ImmVal))
3466       return MatchOperand_ParseFail;
3467     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3468     if (!MCE) {
3469       Error(ExprLoc, "immediate value expected for barrier operand");
3470       return MatchOperand_ParseFail;
3471     }
3472     int64_t Value = MCE->getValue();
3473     if (Mnemonic == "dsb" && Value > 15) {
3474       // This case is a no match here, but it might be matched by the nXS
3475       // variant. Deliberately not unlex the optional '#' as it is not necessary
3476       // to characterize an integer immediate.
3477       Parser.getLexer().UnLex(IntTok);
3478       return MatchOperand_NoMatch;
3479     }
3480     if (Value < 0 || Value > 15) {
3481       Error(ExprLoc, "barrier operand out of range");
3482       return MatchOperand_ParseFail;
3483     }
3484     auto DB = AArch64DB::lookupDBByEncoding(Value);
3485     Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3486                                                      ExprLoc, getContext(),
3487                                                      false /*hasnXSModifier*/));
3488     return MatchOperand_Success;
3489   }
3490 
3491   if (Tok.isNot(AsmToken::Identifier)) {
3492     TokError("invalid operand for instruction");
3493     return MatchOperand_ParseFail;
3494   }
3495 
3496   StringRef Operand = Tok.getString();
3497   auto TSB = AArch64TSB::lookupTSBByName(Operand);
3498   auto DB = AArch64DB::lookupDBByName(Operand);
3499   // The only valid named option for ISB is 'sy'
3500   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3501     TokError("'sy' or #imm operand expected");
3502     return MatchOperand_ParseFail;
3503   // The only valid named option for TSB is 'csync'
3504   } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3505     TokError("'csync' operand expected");
3506     return MatchOperand_ParseFail;
3507   } else if (!DB && !TSB) {
3508     if (Mnemonic == "dsb") {
3509       // This case is a no match here, but it might be matched by the nXS
3510       // variant.
3511       return MatchOperand_NoMatch;
3512     }
3513     TokError("invalid barrier option name");
3514     return MatchOperand_ParseFail;
3515   }
3516 
3517   Operands.push_back(AArch64Operand::CreateBarrier(
3518       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3519       getContext(), false /*hasnXSModifier*/));
3520   Lex(); // Consume the option
3521 
3522   return MatchOperand_Success;
3523 }
3524 
3525 OperandMatchResultTy
3526 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3527   const AsmToken &Tok = getTok();
3528 
3529   assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3530   if (Mnemonic != "dsb")
3531     return MatchOperand_ParseFail;
3532 
3533   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3534     // Immediate operand.
3535     const MCExpr *ImmVal;
3536     SMLoc ExprLoc = getLoc();
3537     if (getParser().parseExpression(ImmVal))
3538       return MatchOperand_ParseFail;
3539     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3540     if (!MCE) {
3541       Error(ExprLoc, "immediate value expected for barrier operand");
3542       return MatchOperand_ParseFail;
3543     }
3544     int64_t Value = MCE->getValue();
3545     // v8.7-A DSB in the nXS variant accepts only the following immediate
3546     // values: 16, 20, 24, 28.
3547     if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3548       Error(ExprLoc, "barrier operand out of range");
3549       return MatchOperand_ParseFail;
3550     }
3551     auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3552     Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3553                                                      ExprLoc, getContext(),
3554                                                      true /*hasnXSModifier*/));
3555     return MatchOperand_Success;
3556   }
3557 
3558   if (Tok.isNot(AsmToken::Identifier)) {
3559     TokError("invalid operand for instruction");
3560     return MatchOperand_ParseFail;
3561   }
3562 
3563   StringRef Operand = Tok.getString();
3564   auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3565 
3566   if (!DB) {
3567     TokError("invalid barrier option name");
3568     return MatchOperand_ParseFail;
3569   }
3570 
3571   Operands.push_back(
3572       AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3573                                     getContext(), true /*hasnXSModifier*/));
3574   Lex(); // Consume the option
3575 
3576   return MatchOperand_Success;
3577 }
3578 
3579 OperandMatchResultTy
3580 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3581   const AsmToken &Tok = getTok();
3582 
3583   if (Tok.isNot(AsmToken::Identifier))
3584     return MatchOperand_NoMatch;
3585 
3586   if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3587     return MatchOperand_NoMatch;
3588 
3589   int MRSReg, MSRReg;
3590   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3591   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3592     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3593     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3594   } else
3595     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3596 
3597   auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3598   unsigned PStateImm = -1;
3599   if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3600     PStateImm = PState->Encoding;
3601 
3602   Operands.push_back(
3603       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3604                                    PStateImm, getContext()));
3605   Lex(); // Eat identifier
3606 
3607   return MatchOperand_Success;
3608 }
3609 
3610 /// tryParseNeonVectorRegister - Parse a vector register operand.
3611 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3612   if (getTok().isNot(AsmToken::Identifier))
3613     return true;
3614 
3615   SMLoc S = getLoc();
3616   // Check for a vector register specifier first.
3617   StringRef Kind;
3618   unsigned Reg;
3619   OperandMatchResultTy Res =
3620       tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3621   if (Res != MatchOperand_Success)
3622     return true;
3623 
3624   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3625   if (!KindRes)
3626     return true;
3627 
3628   unsigned ElementWidth = KindRes->second;
3629   Operands.push_back(
3630       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3631                                       S, getLoc(), getContext()));
3632 
3633   // If there was an explicit qualifier, that goes on as a literal text
3634   // operand.
3635   if (!Kind.empty())
3636     Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3637 
3638   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3639 }
3640 
3641 OperandMatchResultTy
3642 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3643   SMLoc SIdx = getLoc();
3644   if (parseOptionalToken(AsmToken::LBrac)) {
3645     const MCExpr *ImmVal;
3646     if (getParser().parseExpression(ImmVal))
3647       return MatchOperand_NoMatch;
3648     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3649     if (!MCE) {
3650       TokError("immediate value expected for vector index");
3651       return MatchOperand_ParseFail;;
3652     }
3653 
3654     SMLoc E = getLoc();
3655 
3656     if (parseToken(AsmToken::RBrac, "']' expected"))
3657       return MatchOperand_ParseFail;;
3658 
3659     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3660                                                          E, getContext()));
3661     return MatchOperand_Success;
3662   }
3663 
3664   return MatchOperand_NoMatch;
3665 }
3666 
3667 // tryParseVectorRegister - Try to parse a vector register name with
3668 // optional kind specifier. If it is a register specifier, eat the token
3669 // and return it.
3670 OperandMatchResultTy
3671 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3672                                          RegKind MatchKind) {
3673   const AsmToken &Tok = getTok();
3674 
3675   if (Tok.isNot(AsmToken::Identifier))
3676     return MatchOperand_NoMatch;
3677 
3678   StringRef Name = Tok.getString();
3679   // If there is a kind specifier, it's separated from the register name by
3680   // a '.'.
3681   size_t Start = 0, Next = Name.find('.');
3682   StringRef Head = Name.slice(Start, Next);
3683   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3684 
3685   if (RegNum) {
3686     if (Next != StringRef::npos) {
3687       Kind = Name.slice(Next, StringRef::npos);
3688       if (!isValidVectorKind(Kind, MatchKind)) {
3689         TokError("invalid vector kind qualifier");
3690         return MatchOperand_ParseFail;
3691       }
3692     }
3693     Lex(); // Eat the register token.
3694 
3695     Reg = RegNum;
3696     return MatchOperand_Success;
3697   }
3698 
3699   return MatchOperand_NoMatch;
3700 }
3701 
3702 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3703 OperandMatchResultTy
3704 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3705   // Check for a SVE predicate register specifier first.
3706   const SMLoc S = getLoc();
3707   StringRef Kind;
3708   unsigned RegNum;
3709   auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3710   if (Res != MatchOperand_Success)
3711     return Res;
3712 
3713   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3714   if (!KindRes)
3715     return MatchOperand_NoMatch;
3716 
3717   unsigned ElementWidth = KindRes->second;
3718   Operands.push_back(AArch64Operand::CreateVectorReg(
3719       RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3720       getLoc(), getContext()));
3721 
3722   if (getLexer().is(AsmToken::LBrac)) {
3723     // Indexed predicate, there's no comma so try parse the next operand
3724     // immediately.
3725     if (parseOperand(Operands, false, false))
3726       return MatchOperand_NoMatch;
3727   }
3728 
3729   // Not all predicates are followed by a '/m' or '/z'.
3730   if (getTok().isNot(AsmToken::Slash))
3731     return MatchOperand_Success;
3732 
3733   // But when they do they shouldn't have an element type suffix.
3734   if (!Kind.empty()) {
3735     Error(S, "not expecting size suffix");
3736     return MatchOperand_ParseFail;
3737   }
3738 
3739   // Add a literal slash as operand
3740   Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3741 
3742   Lex(); // Eat the slash.
3743 
3744   // Zeroing or merging?
3745   auto Pred = getTok().getString().lower();
3746   if (Pred != "z" && Pred != "m") {
3747     Error(getLoc(), "expecting 'm' or 'z' predication");
3748     return MatchOperand_ParseFail;
3749   }
3750 
3751   // Add zero/merge token.
3752   const char *ZM = Pred == "z" ? "z" : "m";
3753   Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3754 
3755   Lex(); // Eat zero/merge token.
3756   return MatchOperand_Success;
3757 }
3758 
3759 /// parseRegister - Parse a register operand.
3760 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3761   // Try for a Neon vector register.
3762   if (!tryParseNeonVectorRegister(Operands))
3763     return false;
3764 
3765   // Otherwise try for a scalar register.
3766   if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3767     return false;
3768 
3769   return true;
3770 }
3771 
3772 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3773   bool HasELFModifier = false;
3774   AArch64MCExpr::VariantKind RefKind;
3775 
3776   if (parseOptionalToken(AsmToken::Colon)) {
3777     HasELFModifier = true;
3778 
3779     if (getTok().isNot(AsmToken::Identifier))
3780       return TokError("expect relocation specifier in operand after ':'");
3781 
3782     std::string LowerCase = getTok().getIdentifier().lower();
3783     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3784                   .Case("lo12", AArch64MCExpr::VK_LO12)
3785                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3786                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3787                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3788                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3789                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3790                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3791                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3792                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3793                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3794                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3795                   .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3796                   .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3797                   .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3798                   .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3799                   .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3800                   .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3801                   .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3802                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3803                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3804                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3805                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3806                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3807                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3808                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3809                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3810                   .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3811                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3812                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3813                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3814                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3815                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3816                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3817                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3818                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3819                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3820                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3821                   .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3822                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3823                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3824                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3825                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3826                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3827                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3828                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3829                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3830                   .Default(AArch64MCExpr::VK_INVALID);
3831 
3832     if (RefKind == AArch64MCExpr::VK_INVALID)
3833       return TokError("expect relocation specifier in operand after ':'");
3834 
3835     Lex(); // Eat identifier
3836 
3837     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3838       return true;
3839   }
3840 
3841   if (getParser().parseExpression(ImmVal))
3842     return true;
3843 
3844   if (HasELFModifier)
3845     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3846 
3847   return false;
3848 }
3849 
3850 OperandMatchResultTy
3851 AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3852   if (getTok().isNot(AsmToken::LCurly))
3853     return MatchOperand_NoMatch;
3854 
3855   auto ParseMatrixTile = [this](unsigned &Reg, unsigned &ElementWidth) {
3856     StringRef Name = getTok().getString();
3857     size_t DotPosition = Name.find('.');
3858     if (DotPosition == StringRef::npos)
3859       return MatchOperand_NoMatch;
3860 
3861     unsigned RegNum = matchMatrixTileListRegName(Name);
3862     if (!RegNum)
3863       return MatchOperand_NoMatch;
3864 
3865     StringRef Tail = Name.drop_front(DotPosition);
3866     const Optional<std::pair<int, int>> &KindRes =
3867         parseVectorKind(Tail, RegKind::Matrix);
3868     if (!KindRes) {
3869       TokError("Expected the register to be followed by element width suffix");
3870       return MatchOperand_ParseFail;
3871     }
3872     ElementWidth = KindRes->second;
3873     Reg = RegNum;
3874     Lex(); // Eat the register.
3875     return MatchOperand_Success;
3876   };
3877 
3878   SMLoc S = getLoc();
3879   auto LCurly = getTok();
3880   Lex(); // Eat left bracket token.
3881 
3882   // Empty matrix list
3883   if (parseOptionalToken(AsmToken::RCurly)) {
3884     Operands.push_back(AArch64Operand::CreateMatrixTileList(
3885         /*RegMask=*/0, S, getLoc(), getContext()));
3886     return MatchOperand_Success;
3887   }
3888 
3889   // Try parse {za} alias early
3890   if (getTok().getString().equals_insensitive("za")) {
3891     Lex(); // Eat 'za'
3892 
3893     if (parseToken(AsmToken::RCurly, "'}' expected"))
3894       return MatchOperand_ParseFail;
3895 
3896     Operands.push_back(AArch64Operand::CreateMatrixTileList(
3897         /*RegMask=*/0xFF, S, getLoc(), getContext()));
3898     return MatchOperand_Success;
3899   }
3900 
3901   SMLoc TileLoc = getLoc();
3902 
3903   unsigned FirstReg, ElementWidth;
3904   auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3905   if (ParseRes != MatchOperand_Success) {
3906     getLexer().UnLex(LCurly);
3907     return ParseRes;
3908   }
3909 
3910   const MCRegisterInfo *RI = getContext().getRegisterInfo();
3911 
3912   unsigned PrevReg = FirstReg;
3913   unsigned Count = 1;
3914 
3915   SmallSet<unsigned, 8> DRegs;
3916   AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3917 
3918   SmallSet<unsigned, 8> SeenRegs;
3919   SeenRegs.insert(FirstReg);
3920 
3921   while (parseOptionalToken(AsmToken::Comma)) {
3922     TileLoc = getLoc();
3923     unsigned Reg, NextElementWidth;
3924     ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3925     if (ParseRes != MatchOperand_Success)
3926       return ParseRes;
3927 
3928     // Element size must match on all regs in the list.
3929     if (ElementWidth != NextElementWidth) {
3930       Error(TileLoc, "mismatched register size suffix");
3931       return MatchOperand_ParseFail;
3932     }
3933 
3934     if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3935       Warning(TileLoc, "tile list not in ascending order");
3936 
3937     if (SeenRegs.contains(Reg))
3938       Warning(TileLoc, "duplicate tile in list");
3939     else {
3940       SeenRegs.insert(Reg);
3941       AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3942     }
3943 
3944     PrevReg = Reg;
3945     ++Count;
3946   }
3947 
3948   if (parseToken(AsmToken::RCurly, "'}' expected"))
3949     return MatchOperand_ParseFail;
3950 
3951   unsigned RegMask = 0;
3952   for (auto Reg : DRegs)
3953     RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3954                        RI->getEncodingValue(AArch64::ZAD0));
3955   Operands.push_back(
3956       AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3957 
3958   return MatchOperand_Success;
3959 }
3960 
3961 template <RegKind VectorKind>
3962 OperandMatchResultTy
3963 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3964                                      bool ExpectMatch) {
3965   MCAsmParser &Parser = getParser();
3966   if (!getTok().is(AsmToken::LCurly))
3967     return MatchOperand_NoMatch;
3968 
3969   // Wrapper around parse function
3970   auto ParseVector = [this](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3971                             bool NoMatchIsError) {
3972     auto RegTok = getTok();
3973     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3974     if (ParseRes == MatchOperand_Success) {
3975       if (parseVectorKind(Kind, VectorKind))
3976         return ParseRes;
3977       llvm_unreachable("Expected a valid vector kind");
3978     }
3979 
3980     if (RegTok.isNot(AsmToken::Identifier) ||
3981         ParseRes == MatchOperand_ParseFail ||
3982         (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3983          !RegTok.getString().startswith_insensitive("za"))) {
3984       Error(Loc, "vector register expected");
3985       return MatchOperand_ParseFail;
3986     }
3987 
3988     return MatchOperand_NoMatch;
3989   };
3990 
3991   SMLoc S = getLoc();
3992   auto LCurly = getTok();
3993   Lex(); // Eat left bracket token.
3994 
3995   StringRef Kind;
3996   unsigned FirstReg;
3997   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3998 
3999   // Put back the original left bracket if there was no match, so that
4000   // different types of list-operands can be matched (e.g. SVE, Neon).
4001   if (ParseRes == MatchOperand_NoMatch)
4002     Parser.getLexer().UnLex(LCurly);
4003 
4004   if (ParseRes != MatchOperand_Success)
4005     return ParseRes;
4006 
4007   int64_t PrevReg = FirstReg;
4008   unsigned Count = 1;
4009 
4010   if (parseOptionalToken(AsmToken::Minus)) {
4011     SMLoc Loc = getLoc();
4012     StringRef NextKind;
4013 
4014     unsigned Reg;
4015     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4016     if (ParseRes != MatchOperand_Success)
4017       return ParseRes;
4018 
4019     // Any Kind suffices must match on all regs in the list.
4020     if (Kind != NextKind) {
4021       Error(Loc, "mismatched register size suffix");
4022       return MatchOperand_ParseFail;
4023     }
4024 
4025     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4026 
4027     if (Space == 0 || Space > 3) {
4028       Error(Loc, "invalid number of vectors");
4029       return MatchOperand_ParseFail;
4030     }
4031 
4032     Count += Space;
4033   }
4034   else {
4035     while (parseOptionalToken(AsmToken::Comma)) {
4036       SMLoc Loc = getLoc();
4037       StringRef NextKind;
4038       unsigned Reg;
4039       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4040       if (ParseRes != MatchOperand_Success)
4041         return ParseRes;
4042 
4043       // Any Kind suffices must match on all regs in the list.
4044       if (Kind != NextKind) {
4045         Error(Loc, "mismatched register size suffix");
4046         return MatchOperand_ParseFail;
4047       }
4048 
4049       // Registers must be incremental (with wraparound at 31)
4050       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4051           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4052         Error(Loc, "registers must be sequential");
4053         return MatchOperand_ParseFail;
4054       }
4055 
4056       PrevReg = Reg;
4057       ++Count;
4058     }
4059   }
4060 
4061   if (parseToken(AsmToken::RCurly, "'}' expected"))
4062     return MatchOperand_ParseFail;
4063 
4064   if (Count > 4) {
4065     Error(S, "invalid number of vectors");
4066     return MatchOperand_ParseFail;
4067   }
4068 
4069   unsigned NumElements = 0;
4070   unsigned ElementWidth = 0;
4071   if (!Kind.empty()) {
4072     if (const auto &VK = parseVectorKind(Kind, VectorKind))
4073       std::tie(NumElements, ElementWidth) = *VK;
4074   }
4075 
4076   Operands.push_back(AArch64Operand::CreateVectorList(
4077       FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4078       getContext()));
4079 
4080   return MatchOperand_Success;
4081 }
4082 
4083 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4084 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4085   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4086   if (ParseRes != MatchOperand_Success)
4087     return true;
4088 
4089   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4090 }
4091 
4092 OperandMatchResultTy
4093 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4094   SMLoc StartLoc = getLoc();
4095 
4096   unsigned RegNum;
4097   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4098   if (Res != MatchOperand_Success)
4099     return Res;
4100 
4101   if (!parseOptionalToken(AsmToken::Comma)) {
4102     Operands.push_back(AArch64Operand::CreateReg(
4103         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4104     return MatchOperand_Success;
4105   }
4106 
4107   parseOptionalToken(AsmToken::Hash);
4108 
4109   if (getTok().isNot(AsmToken::Integer)) {
4110     Error(getLoc(), "index must be absent or #0");
4111     return MatchOperand_ParseFail;
4112   }
4113 
4114   const MCExpr *ImmVal;
4115   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4116       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4117     Error(getLoc(), "index must be absent or #0");
4118     return MatchOperand_ParseFail;
4119   }
4120 
4121   Operands.push_back(AArch64Operand::CreateReg(
4122       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4123   return MatchOperand_Success;
4124 }
4125 
4126 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4127 OperandMatchResultTy
4128 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4129   SMLoc StartLoc = getLoc();
4130 
4131   unsigned RegNum;
4132   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4133   if (Res != MatchOperand_Success)
4134     return Res;
4135 
4136   // No shift/extend is the default.
4137   if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4138     Operands.push_back(AArch64Operand::CreateReg(
4139         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4140     return MatchOperand_Success;
4141   }
4142 
4143   // Eat the comma
4144   Lex();
4145 
4146   // Match the shift
4147   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4148   Res = tryParseOptionalShiftExtend(ExtOpnd);
4149   if (Res != MatchOperand_Success)
4150     return Res;
4151 
4152   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4153   Operands.push_back(AArch64Operand::CreateReg(
4154       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4155       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4156       Ext->hasShiftExtendAmount()));
4157 
4158   return MatchOperand_Success;
4159 }
4160 
4161 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4162   MCAsmParser &Parser = getParser();
4163 
4164   // Some SVE instructions have a decoration after the immediate, i.e.
4165   // "mul vl". We parse them here and add tokens, which must be present in the
4166   // asm string in the tablegen instruction.
4167   bool NextIsVL =
4168       Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4169   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4170   if (!getTok().getString().equals_insensitive("mul") ||
4171       !(NextIsVL || NextIsHash))
4172     return true;
4173 
4174   Operands.push_back(
4175       AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4176   Lex(); // Eat the "mul"
4177 
4178   if (NextIsVL) {
4179     Operands.push_back(
4180         AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4181     Lex(); // Eat the "vl"
4182     return false;
4183   }
4184 
4185   if (NextIsHash) {
4186     Lex(); // Eat the #
4187     SMLoc S = getLoc();
4188 
4189     // Parse immediate operand.
4190     const MCExpr *ImmVal;
4191     if (!Parser.parseExpression(ImmVal))
4192       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4193         Operands.push_back(AArch64Operand::CreateImm(
4194             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4195             getContext()));
4196         return MatchOperand_Success;
4197       }
4198   }
4199 
4200   return Error(getLoc(), "expected 'vl' or '#<imm>'");
4201 }
4202 
4203 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4204   auto Tok = getTok();
4205   if (Tok.isNot(AsmToken::Identifier))
4206     return true;
4207 
4208   auto Keyword = Tok.getString();
4209   Keyword = StringSwitch<StringRef>(Keyword.lower())
4210                 .Case("sm", "sm")
4211                 .Case("za", "za")
4212                 .Default(Keyword);
4213   Operands.push_back(
4214       AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4215 
4216   Lex();
4217   return false;
4218 }
4219 
4220 /// parseOperand - Parse a arm instruction operand.  For now this parses the
4221 /// operand regardless of the mnemonic.
4222 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4223                                   bool invertCondCode) {
4224   MCAsmParser &Parser = getParser();
4225 
4226   OperandMatchResultTy ResTy =
4227       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4228 
4229   // Check if the current operand has a custom associated parser, if so, try to
4230   // custom parse the operand, or fallback to the general approach.
4231   if (ResTy == MatchOperand_Success)
4232     return false;
4233   // If there wasn't a custom match, try the generic matcher below. Otherwise,
4234   // there was a match, but an error occurred, in which case, just return that
4235   // the operand parsing failed.
4236   if (ResTy == MatchOperand_ParseFail)
4237     return true;
4238 
4239   // Nothing custom, so do general case parsing.
4240   SMLoc S, E;
4241   switch (getLexer().getKind()) {
4242   default: {
4243     SMLoc S = getLoc();
4244     const MCExpr *Expr;
4245     if (parseSymbolicImmVal(Expr))
4246       return Error(S, "invalid operand");
4247 
4248     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4249     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4250     return false;
4251   }
4252   case AsmToken::LBrac: {
4253     Operands.push_back(
4254         AArch64Operand::CreateToken("[", getLoc(), getContext()));
4255     Lex(); // Eat '['
4256 
4257     // There's no comma after a '[', so we can parse the next operand
4258     // immediately.
4259     return parseOperand(Operands, false, false);
4260   }
4261   case AsmToken::LCurly: {
4262     if (!parseNeonVectorList(Operands))
4263       return false;
4264 
4265     Operands.push_back(
4266         AArch64Operand::CreateToken("{", getLoc(), getContext()));
4267     Lex(); // Eat '{'
4268 
4269     // There's no comma after a '{', so we can parse the next operand
4270     // immediately.
4271     return parseOperand(Operands, false, false);
4272   }
4273   case AsmToken::Identifier: {
4274     // If we're expecting a Condition Code operand, then just parse that.
4275     if (isCondCode)
4276       return parseCondCode(Operands, invertCondCode);
4277 
4278     // If it's a register name, parse it.
4279     if (!parseRegister(Operands))
4280       return false;
4281 
4282     // See if this is a "mul vl" decoration or "mul #<int>" operand used
4283     // by SVE instructions.
4284     if (!parseOptionalMulOperand(Operands))
4285       return false;
4286 
4287     // If this is an "smstart" or "smstop" instruction, parse its special
4288     // keyword operand as an identifier.
4289     if (Mnemonic == "smstart" || Mnemonic == "smstop")
4290       return parseKeywordOperand(Operands);
4291 
4292     // This could be an optional "shift" or "extend" operand.
4293     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4294     // We can only continue if no tokens were eaten.
4295     if (GotShift != MatchOperand_NoMatch)
4296       return GotShift;
4297 
4298     // If this is a two-word mnemonic, parse its special keyword
4299     // operand as an identifier.
4300     if (Mnemonic == "brb")
4301       return parseKeywordOperand(Operands);
4302 
4303     // This was not a register so parse other operands that start with an
4304     // identifier (like labels) as expressions and create them as immediates.
4305     const MCExpr *IdVal;
4306     S = getLoc();
4307     if (getParser().parseExpression(IdVal))
4308       return true;
4309     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4310     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4311     return false;
4312   }
4313   case AsmToken::Integer:
4314   case AsmToken::Real:
4315   case AsmToken::Hash: {
4316     // #42 -> immediate.
4317     S = getLoc();
4318 
4319     parseOptionalToken(AsmToken::Hash);
4320 
4321     // Parse a negative sign
4322     bool isNegative = false;
4323     if (getTok().is(AsmToken::Minus)) {
4324       isNegative = true;
4325       // We need to consume this token only when we have a Real, otherwise
4326       // we let parseSymbolicImmVal take care of it
4327       if (Parser.getLexer().peekTok().is(AsmToken::Real))
4328         Lex();
4329     }
4330 
4331     // The only Real that should come through here is a literal #0.0 for
4332     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4333     // so convert the value.
4334     const AsmToken &Tok = getTok();
4335     if (Tok.is(AsmToken::Real)) {
4336       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4337       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4338       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4339           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4340           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4341         return TokError("unexpected floating point literal");
4342       else if (IntVal != 0 || isNegative)
4343         return TokError("expected floating-point constant #0.0");
4344       Lex(); // Eat the token.
4345 
4346       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4347       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4348       return false;
4349     }
4350 
4351     const MCExpr *ImmVal;
4352     if (parseSymbolicImmVal(ImmVal))
4353       return true;
4354 
4355     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4356     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4357     return false;
4358   }
4359   case AsmToken::Equal: {
4360     SMLoc Loc = getLoc();
4361     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4362       return TokError("unexpected token in operand");
4363     Lex(); // Eat '='
4364     const MCExpr *SubExprVal;
4365     if (getParser().parseExpression(SubExprVal))
4366       return true;
4367 
4368     if (Operands.size() < 2 ||
4369         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4370       return Error(Loc, "Only valid when first operand is register");
4371 
4372     bool IsXReg =
4373         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4374             Operands[1]->getReg());
4375 
4376     MCContext& Ctx = getContext();
4377     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4378     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4379     if (isa<MCConstantExpr>(SubExprVal)) {
4380       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4381       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4382       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4383         ShiftAmt += 16;
4384         Imm >>= 16;
4385       }
4386       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4387         Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4388         Operands.push_back(AArch64Operand::CreateImm(
4389             MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4390         if (ShiftAmt)
4391           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4392                      ShiftAmt, true, S, E, Ctx));
4393         return false;
4394       }
4395       APInt Simm = APInt(64, Imm << ShiftAmt);
4396       // check if the immediate is an unsigned or signed 32-bit int for W regs
4397       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4398         return Error(Loc, "Immediate too large for register");
4399     }
4400     // If it is a label or an imm that cannot fit in a movz, put it into CP.
4401     const MCExpr *CPLoc =
4402         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4403     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4404     return false;
4405   }
4406   }
4407 }
4408 
4409 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4410   const MCExpr *Expr = nullptr;
4411   SMLoc L = getLoc();
4412   if (check(getParser().parseExpression(Expr), L, "expected expression"))
4413     return true;
4414   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4415   if (check(!Value, L, "expected constant expression"))
4416     return true;
4417   Out = Value->getValue();
4418   return false;
4419 }
4420 
4421 bool AArch64AsmParser::parseComma() {
4422   if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4423     return true;
4424   // Eat the comma
4425   Lex();
4426   return false;
4427 }
4428 
4429 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4430                                             unsigned First, unsigned Last) {
4431   unsigned Reg;
4432   SMLoc Start, End;
4433   if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4434     return true;
4435 
4436   // Special handling for FP and LR; they aren't linearly after x28 in
4437   // the registers enum.
4438   unsigned RangeEnd = Last;
4439   if (Base == AArch64::X0) {
4440     if (Last == AArch64::FP) {
4441       RangeEnd = AArch64::X28;
4442       if (Reg == AArch64::FP) {
4443         Out = 29;
4444         return false;
4445       }
4446     }
4447     if (Last == AArch64::LR) {
4448       RangeEnd = AArch64::X28;
4449       if (Reg == AArch64::FP) {
4450         Out = 29;
4451         return false;
4452       } else if (Reg == AArch64::LR) {
4453         Out = 30;
4454         return false;
4455       }
4456     }
4457   }
4458 
4459   if (check(Reg < First || Reg > RangeEnd, Start,
4460             Twine("expected register in range ") +
4461                 AArch64InstPrinter::getRegisterName(First) + " to " +
4462                 AArch64InstPrinter::getRegisterName(Last)))
4463     return true;
4464   Out = Reg - Base;
4465   return false;
4466 }
4467 
4468 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4469                                  const MCParsedAsmOperand &Op2) const {
4470   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4471   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4472   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4473       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4474     return MCTargetAsmParser::regsEqual(Op1, Op2);
4475 
4476   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4477          "Testing equality of non-scalar registers not supported");
4478 
4479   // Check if a registers match their sub/super register classes.
4480   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4481     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4482   if (AOp1.getRegEqualityTy() == EqualsSubReg)
4483     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4484   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4485     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4486   if (AOp2.getRegEqualityTy() == EqualsSubReg)
4487     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4488 
4489   return false;
4490 }
4491 
4492 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4493 /// operands.
4494 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4495                                         StringRef Name, SMLoc NameLoc,
4496                                         OperandVector &Operands) {
4497   Name = StringSwitch<StringRef>(Name.lower())
4498              .Case("beq", "b.eq")
4499              .Case("bne", "b.ne")
4500              .Case("bhs", "b.hs")
4501              .Case("bcs", "b.cs")
4502              .Case("blo", "b.lo")
4503              .Case("bcc", "b.cc")
4504              .Case("bmi", "b.mi")
4505              .Case("bpl", "b.pl")
4506              .Case("bvs", "b.vs")
4507              .Case("bvc", "b.vc")
4508              .Case("bhi", "b.hi")
4509              .Case("bls", "b.ls")
4510              .Case("bge", "b.ge")
4511              .Case("blt", "b.lt")
4512              .Case("bgt", "b.gt")
4513              .Case("ble", "b.le")
4514              .Case("bal", "b.al")
4515              .Case("bnv", "b.nv")
4516              .Default(Name);
4517 
4518   // First check for the AArch64-specific .req directive.
4519   if (getTok().is(AsmToken::Identifier) &&
4520       getTok().getIdentifier().lower() == ".req") {
4521     parseDirectiveReq(Name, NameLoc);
4522     // We always return 'error' for this, as we're done with this
4523     // statement and don't need to match the 'instruction."
4524     return true;
4525   }
4526 
4527   // Create the leading tokens for the mnemonic, split by '.' characters.
4528   size_t Start = 0, Next = Name.find('.');
4529   StringRef Head = Name.slice(Start, Next);
4530 
4531   // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4532   // the SYS instruction.
4533   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4534       Head == "cfp" || Head == "dvp" || Head == "cpp")
4535     return parseSysAlias(Head, NameLoc, Operands);
4536 
4537   Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4538   Mnemonic = Head;
4539 
4540   // Handle condition codes for a branch mnemonic
4541   if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
4542     Start = Next;
4543     Next = Name.find('.', Start + 1);
4544     Head = Name.slice(Start + 1, Next);
4545 
4546     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4547                                             (Head.data() - Name.data()));
4548     AArch64CC::CondCode CC = parseCondCodeString(Head);
4549     if (CC == AArch64CC::Invalid)
4550       return Error(SuffixLoc, "invalid condition code");
4551     Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4552                                                    /*IsSuffix=*/true));
4553     Operands.push_back(
4554         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4555   }
4556 
4557   // Add the remaining tokens in the mnemonic.
4558   while (Next != StringRef::npos) {
4559     Start = Next;
4560     Next = Name.find('.', Start + 1);
4561     Head = Name.slice(Start, Next);
4562     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4563                                             (Head.data() - Name.data()) + 1);
4564     Operands.push_back(AArch64Operand::CreateToken(
4565         Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4566   }
4567 
4568   // Conditional compare instructions have a Condition Code operand, which needs
4569   // to be parsed and an immediate operand created.
4570   bool condCodeFourthOperand =
4571       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4572        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4573        Head == "csinc" || Head == "csinv" || Head == "csneg");
4574 
4575   // These instructions are aliases to some of the conditional select
4576   // instructions. However, the condition code is inverted in the aliased
4577   // instruction.
4578   //
4579   // FIXME: Is this the correct way to handle these? Or should the parser
4580   //        generate the aliased instructions directly?
4581   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4582   bool condCodeThirdOperand =
4583       (Head == "cinc" || Head == "cinv" || Head == "cneg");
4584 
4585   // Read the remaining operands.
4586   if (getLexer().isNot(AsmToken::EndOfStatement)) {
4587 
4588     unsigned N = 1;
4589     do {
4590       // Parse and remember the operand.
4591       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4592                                      (N == 3 && condCodeThirdOperand) ||
4593                                      (N == 2 && condCodeSecondOperand),
4594                        condCodeSecondOperand || condCodeThirdOperand)) {
4595         return true;
4596       }
4597 
4598       // After successfully parsing some operands there are three special cases
4599       // to consider (i.e. notional operands not separated by commas). Two are
4600       // due to memory specifiers:
4601       //  + An RBrac will end an address for load/store/prefetch
4602       //  + An '!' will indicate a pre-indexed operation.
4603       //
4604       // And a further case is '}', which ends a group of tokens specifying the
4605       // SME accumulator array 'ZA' or tile vector, i.e.
4606       //
4607       //   '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4608       //
4609       // It's someone else's responsibility to make sure these tokens are sane
4610       // in the given context!
4611 
4612       if (parseOptionalToken(AsmToken::RBrac))
4613         Operands.push_back(
4614             AArch64Operand::CreateToken("]", getLoc(), getContext()));
4615       if (parseOptionalToken(AsmToken::Exclaim))
4616         Operands.push_back(
4617             AArch64Operand::CreateToken("!", getLoc(), getContext()));
4618       if (parseOptionalToken(AsmToken::RCurly))
4619         Operands.push_back(
4620             AArch64Operand::CreateToken("}", getLoc(), getContext()));
4621 
4622       ++N;
4623     } while (parseOptionalToken(AsmToken::Comma));
4624   }
4625 
4626   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4627     return true;
4628 
4629   return false;
4630 }
4631 
4632 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4633   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4634   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4635          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4636          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4637          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4638          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4639          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4640 }
4641 
4642 // FIXME: This entire function is a giant hack to provide us with decent
4643 // operand range validation/diagnostics until TableGen/MC can be extended
4644 // to support autogeneration of this kind of validation.
4645 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4646                                            SmallVectorImpl<SMLoc> &Loc) {
4647   const MCRegisterInfo *RI = getContext().getRegisterInfo();
4648   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4649 
4650   // A prefix only applies to the instruction following it.  Here we extract
4651   // prefix information for the next instruction before validating the current
4652   // one so that in the case of failure we don't erronously continue using the
4653   // current prefix.
4654   PrefixInfo Prefix = NextPrefix;
4655   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4656 
4657   // Before validating the instruction in isolation we run through the rules
4658   // applicable when it follows a prefix instruction.
4659   // NOTE: brk & hlt can be prefixed but require no additional validation.
4660   if (Prefix.isActive() &&
4661       (Inst.getOpcode() != AArch64::BRK) &&
4662       (Inst.getOpcode() != AArch64::HLT)) {
4663 
4664     // Prefixed intructions must have a destructive operand.
4665     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4666         AArch64::NotDestructive)
4667       return Error(IDLoc, "instruction is unpredictable when following a"
4668                    " movprfx, suggest replacing movprfx with mov");
4669 
4670     // Destination operands must match.
4671     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4672       return Error(Loc[0], "instruction is unpredictable when following a"
4673                    " movprfx writing to a different destination");
4674 
4675     // Destination operand must not be used in any other location.
4676     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4677       if (Inst.getOperand(i).isReg() &&
4678           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4679           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4680         return Error(Loc[0], "instruction is unpredictable when following a"
4681                      " movprfx and destination also used as non-destructive"
4682                      " source");
4683     }
4684 
4685     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4686     if (Prefix.isPredicated()) {
4687       int PgIdx = -1;
4688 
4689       // Find the instructions general predicate.
4690       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4691         if (Inst.getOperand(i).isReg() &&
4692             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4693           PgIdx = i;
4694           break;
4695         }
4696 
4697       // Instruction must be predicated if the movprfx is predicated.
4698       if (PgIdx == -1 ||
4699           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4700         return Error(IDLoc, "instruction is unpredictable when following a"
4701                      " predicated movprfx, suggest using unpredicated movprfx");
4702 
4703       // Instruction must use same general predicate as the movprfx.
4704       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4705         return Error(IDLoc, "instruction is unpredictable when following a"
4706                      " predicated movprfx using a different general predicate");
4707 
4708       // Instruction element type must match the movprfx.
4709       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4710         return Error(IDLoc, "instruction is unpredictable when following a"
4711                      " predicated movprfx with a different element size");
4712     }
4713   }
4714 
4715   // Check for indexed addressing modes w/ the base register being the
4716   // same as a destination/source register or pair load where
4717   // the Rt == Rt2. All of those are undefined behaviour.
4718   switch (Inst.getOpcode()) {
4719   case AArch64::LDPSWpre:
4720   case AArch64::LDPWpost:
4721   case AArch64::LDPWpre:
4722   case AArch64::LDPXpost:
4723   case AArch64::LDPXpre: {
4724     unsigned Rt = Inst.getOperand(1).getReg();
4725     unsigned Rt2 = Inst.getOperand(2).getReg();
4726     unsigned Rn = Inst.getOperand(3).getReg();
4727     if (RI->isSubRegisterEq(Rn, Rt))
4728       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4729                            "is also a destination");
4730     if (RI->isSubRegisterEq(Rn, Rt2))
4731       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4732                            "is also a destination");
4733     LLVM_FALLTHROUGH;
4734   }
4735   case AArch64::LDPDi:
4736   case AArch64::LDPQi:
4737   case AArch64::LDPSi:
4738   case AArch64::LDPSWi:
4739   case AArch64::LDPWi:
4740   case AArch64::LDPXi: {
4741     unsigned Rt = Inst.getOperand(0).getReg();
4742     unsigned Rt2 = Inst.getOperand(1).getReg();
4743     if (Rt == Rt2)
4744       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4745     break;
4746   }
4747   case AArch64::LDPDpost:
4748   case AArch64::LDPDpre:
4749   case AArch64::LDPQpost:
4750   case AArch64::LDPQpre:
4751   case AArch64::LDPSpost:
4752   case AArch64::LDPSpre:
4753   case AArch64::LDPSWpost: {
4754     unsigned Rt = Inst.getOperand(1).getReg();
4755     unsigned Rt2 = Inst.getOperand(2).getReg();
4756     if (Rt == Rt2)
4757       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4758     break;
4759   }
4760   case AArch64::STPDpost:
4761   case AArch64::STPDpre:
4762   case AArch64::STPQpost:
4763   case AArch64::STPQpre:
4764   case AArch64::STPSpost:
4765   case AArch64::STPSpre:
4766   case AArch64::STPWpost:
4767   case AArch64::STPWpre:
4768   case AArch64::STPXpost:
4769   case AArch64::STPXpre: {
4770     unsigned Rt = Inst.getOperand(1).getReg();
4771     unsigned Rt2 = Inst.getOperand(2).getReg();
4772     unsigned Rn = Inst.getOperand(3).getReg();
4773     if (RI->isSubRegisterEq(Rn, Rt))
4774       return Error(Loc[0], "unpredictable STP instruction, writeback base "
4775                            "is also a source");
4776     if (RI->isSubRegisterEq(Rn, Rt2))
4777       return Error(Loc[1], "unpredictable STP instruction, writeback base "
4778                            "is also a source");
4779     break;
4780   }
4781   case AArch64::LDRBBpre:
4782   case AArch64::LDRBpre:
4783   case AArch64::LDRHHpre:
4784   case AArch64::LDRHpre:
4785   case AArch64::LDRSBWpre:
4786   case AArch64::LDRSBXpre:
4787   case AArch64::LDRSHWpre:
4788   case AArch64::LDRSHXpre:
4789   case AArch64::LDRSWpre:
4790   case AArch64::LDRWpre:
4791   case AArch64::LDRXpre:
4792   case AArch64::LDRBBpost:
4793   case AArch64::LDRBpost:
4794   case AArch64::LDRHHpost:
4795   case AArch64::LDRHpost:
4796   case AArch64::LDRSBWpost:
4797   case AArch64::LDRSBXpost:
4798   case AArch64::LDRSHWpost:
4799   case AArch64::LDRSHXpost:
4800   case AArch64::LDRSWpost:
4801   case AArch64::LDRWpost:
4802   case AArch64::LDRXpost: {
4803     unsigned Rt = Inst.getOperand(1).getReg();
4804     unsigned Rn = Inst.getOperand(2).getReg();
4805     if (RI->isSubRegisterEq(Rn, Rt))
4806       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4807                            "is also a source");
4808     break;
4809   }
4810   case AArch64::STRBBpost:
4811   case AArch64::STRBpost:
4812   case AArch64::STRHHpost:
4813   case AArch64::STRHpost:
4814   case AArch64::STRWpost:
4815   case AArch64::STRXpost:
4816   case AArch64::STRBBpre:
4817   case AArch64::STRBpre:
4818   case AArch64::STRHHpre:
4819   case AArch64::STRHpre:
4820   case AArch64::STRWpre:
4821   case AArch64::STRXpre: {
4822     unsigned Rt = Inst.getOperand(1).getReg();
4823     unsigned Rn = Inst.getOperand(2).getReg();
4824     if (RI->isSubRegisterEq(Rn, Rt))
4825       return Error(Loc[0], "unpredictable STR instruction, writeback base "
4826                            "is also a source");
4827     break;
4828   }
4829   case AArch64::STXRB:
4830   case AArch64::STXRH:
4831   case AArch64::STXRW:
4832   case AArch64::STXRX:
4833   case AArch64::STLXRB:
4834   case AArch64::STLXRH:
4835   case AArch64::STLXRW:
4836   case AArch64::STLXRX: {
4837     unsigned Rs = Inst.getOperand(0).getReg();
4838     unsigned Rt = Inst.getOperand(1).getReg();
4839     unsigned Rn = Inst.getOperand(2).getReg();
4840     if (RI->isSubRegisterEq(Rt, Rs) ||
4841         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4842       return Error(Loc[0],
4843                    "unpredictable STXR instruction, status is also a source");
4844     break;
4845   }
4846   case AArch64::STXPW:
4847   case AArch64::STXPX:
4848   case AArch64::STLXPW:
4849   case AArch64::STLXPX: {
4850     unsigned Rs = Inst.getOperand(0).getReg();
4851     unsigned Rt1 = Inst.getOperand(1).getReg();
4852     unsigned Rt2 = Inst.getOperand(2).getReg();
4853     unsigned Rn = Inst.getOperand(3).getReg();
4854     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4855         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4856       return Error(Loc[0],
4857                    "unpredictable STXP instruction, status is also a source");
4858     break;
4859   }
4860   case AArch64::LDRABwriteback:
4861   case AArch64::LDRAAwriteback: {
4862     unsigned Xt = Inst.getOperand(0).getReg();
4863     unsigned Xn = Inst.getOperand(1).getReg();
4864     if (Xt == Xn)
4865       return Error(Loc[0],
4866           "unpredictable LDRA instruction, writeback base"
4867           " is also a destination");
4868     break;
4869   }
4870   }
4871 
4872   // Check v8.8-A memops instructions.
4873   switch (Inst.getOpcode()) {
4874   case AArch64::CPYFP:
4875   case AArch64::CPYFPWN:
4876   case AArch64::CPYFPRN:
4877   case AArch64::CPYFPN:
4878   case AArch64::CPYFPWT:
4879   case AArch64::CPYFPWTWN:
4880   case AArch64::CPYFPWTRN:
4881   case AArch64::CPYFPWTN:
4882   case AArch64::CPYFPRT:
4883   case AArch64::CPYFPRTWN:
4884   case AArch64::CPYFPRTRN:
4885   case AArch64::CPYFPRTN:
4886   case AArch64::CPYFPT:
4887   case AArch64::CPYFPTWN:
4888   case AArch64::CPYFPTRN:
4889   case AArch64::CPYFPTN:
4890   case AArch64::CPYFM:
4891   case AArch64::CPYFMWN:
4892   case AArch64::CPYFMRN:
4893   case AArch64::CPYFMN:
4894   case AArch64::CPYFMWT:
4895   case AArch64::CPYFMWTWN:
4896   case AArch64::CPYFMWTRN:
4897   case AArch64::CPYFMWTN:
4898   case AArch64::CPYFMRT:
4899   case AArch64::CPYFMRTWN:
4900   case AArch64::CPYFMRTRN:
4901   case AArch64::CPYFMRTN:
4902   case AArch64::CPYFMT:
4903   case AArch64::CPYFMTWN:
4904   case AArch64::CPYFMTRN:
4905   case AArch64::CPYFMTN:
4906   case AArch64::CPYFE:
4907   case AArch64::CPYFEWN:
4908   case AArch64::CPYFERN:
4909   case AArch64::CPYFEN:
4910   case AArch64::CPYFEWT:
4911   case AArch64::CPYFEWTWN:
4912   case AArch64::CPYFEWTRN:
4913   case AArch64::CPYFEWTN:
4914   case AArch64::CPYFERT:
4915   case AArch64::CPYFERTWN:
4916   case AArch64::CPYFERTRN:
4917   case AArch64::CPYFERTN:
4918   case AArch64::CPYFET:
4919   case AArch64::CPYFETWN:
4920   case AArch64::CPYFETRN:
4921   case AArch64::CPYFETN:
4922   case AArch64::CPYP:
4923   case AArch64::CPYPWN:
4924   case AArch64::CPYPRN:
4925   case AArch64::CPYPN:
4926   case AArch64::CPYPWT:
4927   case AArch64::CPYPWTWN:
4928   case AArch64::CPYPWTRN:
4929   case AArch64::CPYPWTN:
4930   case AArch64::CPYPRT:
4931   case AArch64::CPYPRTWN:
4932   case AArch64::CPYPRTRN:
4933   case AArch64::CPYPRTN:
4934   case AArch64::CPYPT:
4935   case AArch64::CPYPTWN:
4936   case AArch64::CPYPTRN:
4937   case AArch64::CPYPTN:
4938   case AArch64::CPYM:
4939   case AArch64::CPYMWN:
4940   case AArch64::CPYMRN:
4941   case AArch64::CPYMN:
4942   case AArch64::CPYMWT:
4943   case AArch64::CPYMWTWN:
4944   case AArch64::CPYMWTRN:
4945   case AArch64::CPYMWTN:
4946   case AArch64::CPYMRT:
4947   case AArch64::CPYMRTWN:
4948   case AArch64::CPYMRTRN:
4949   case AArch64::CPYMRTN:
4950   case AArch64::CPYMT:
4951   case AArch64::CPYMTWN:
4952   case AArch64::CPYMTRN:
4953   case AArch64::CPYMTN:
4954   case AArch64::CPYE:
4955   case AArch64::CPYEWN:
4956   case AArch64::CPYERN:
4957   case AArch64::CPYEN:
4958   case AArch64::CPYEWT:
4959   case AArch64::CPYEWTWN:
4960   case AArch64::CPYEWTRN:
4961   case AArch64::CPYEWTN:
4962   case AArch64::CPYERT:
4963   case AArch64::CPYERTWN:
4964   case AArch64::CPYERTRN:
4965   case AArch64::CPYERTN:
4966   case AArch64::CPYET:
4967   case AArch64::CPYETWN:
4968   case AArch64::CPYETRN:
4969   case AArch64::CPYETN: {
4970     unsigned Xd_wb = Inst.getOperand(0).getReg();
4971     unsigned Xs_wb = Inst.getOperand(1).getReg();
4972     unsigned Xn_wb = Inst.getOperand(2).getReg();
4973     unsigned Xd = Inst.getOperand(3).getReg();
4974     unsigned Xs = Inst.getOperand(4).getReg();
4975     unsigned Xn = Inst.getOperand(5).getReg();
4976     if (Xd_wb != Xd)
4977       return Error(Loc[0],
4978                    "invalid CPY instruction, Xd_wb and Xd do not match");
4979     if (Xs_wb != Xs)
4980       return Error(Loc[0],
4981                    "invalid CPY instruction, Xs_wb and Xs do not match");
4982     if (Xn_wb != Xn)
4983       return Error(Loc[0],
4984                    "invalid CPY instruction, Xn_wb and Xn do not match");
4985     if (Xd == Xs)
4986       return Error(Loc[0], "invalid CPY instruction, destination and source"
4987                            " registers are the same");
4988     if (Xd == Xn)
4989       return Error(Loc[0], "invalid CPY instruction, destination and size"
4990                            " registers are the same");
4991     if (Xs == Xn)
4992       return Error(Loc[0], "invalid CPY instruction, source and size"
4993                            " registers are the same");
4994     break;
4995   }
4996   case AArch64::SETP:
4997   case AArch64::SETPT:
4998   case AArch64::SETPN:
4999   case AArch64::SETPTN:
5000   case AArch64::SETM:
5001   case AArch64::SETMT:
5002   case AArch64::SETMN:
5003   case AArch64::SETMTN:
5004   case AArch64::SETE:
5005   case AArch64::SETET:
5006   case AArch64::SETEN:
5007   case AArch64::SETETN:
5008   case AArch64::SETGP:
5009   case AArch64::SETGPT:
5010   case AArch64::SETGPN:
5011   case AArch64::SETGPTN:
5012   case AArch64::SETGM:
5013   case AArch64::SETGMT:
5014   case AArch64::SETGMN:
5015   case AArch64::SETGMTN:
5016   case AArch64::MOPSSETGE:
5017   case AArch64::MOPSSETGET:
5018   case AArch64::MOPSSETGEN:
5019   case AArch64::MOPSSETGETN: {
5020     unsigned Xd_wb = Inst.getOperand(0).getReg();
5021     unsigned Xn_wb = Inst.getOperand(1).getReg();
5022     unsigned Xd = Inst.getOperand(2).getReg();
5023     unsigned Xn = Inst.getOperand(3).getReg();
5024     unsigned Xm = Inst.getOperand(4).getReg();
5025     if (Xd_wb != Xd)
5026       return Error(Loc[0],
5027                    "invalid SET instruction, Xd_wb and Xd do not match");
5028     if (Xn_wb != Xn)
5029       return Error(Loc[0],
5030                    "invalid SET instruction, Xn_wb and Xn do not match");
5031     if (Xd == Xn)
5032       return Error(Loc[0], "invalid SET instruction, destination and size"
5033                            " registers are the same");
5034     if (Xd == Xm)
5035       return Error(Loc[0], "invalid SET instruction, destination and source"
5036                            " registers are the same");
5037     if (Xn == Xm)
5038       return Error(Loc[0], "invalid SET instruction, source and size"
5039                            " registers are the same");
5040     break;
5041   }
5042   }
5043 
5044   // Now check immediate ranges. Separate from the above as there is overlap
5045   // in the instructions being checked and this keeps the nested conditionals
5046   // to a minimum.
5047   switch (Inst.getOpcode()) {
5048   case AArch64::ADDSWri:
5049   case AArch64::ADDSXri:
5050   case AArch64::ADDWri:
5051   case AArch64::ADDXri:
5052   case AArch64::SUBSWri:
5053   case AArch64::SUBSXri:
5054   case AArch64::SUBWri:
5055   case AArch64::SUBXri: {
5056     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5057     // some slight duplication here.
5058     if (Inst.getOperand(2).isExpr()) {
5059       const MCExpr *Expr = Inst.getOperand(2).getExpr();
5060       AArch64MCExpr::VariantKind ELFRefKind;
5061       MCSymbolRefExpr::VariantKind DarwinRefKind;
5062       int64_t Addend;
5063       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5064 
5065         // Only allow these with ADDXri.
5066         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5067              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5068             Inst.getOpcode() == AArch64::ADDXri)
5069           return false;
5070 
5071         // Only allow these with ADDXri/ADDWri
5072         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5073              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5074              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5075              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5076              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5077              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5078              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5079              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5080              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5081              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5082             (Inst.getOpcode() == AArch64::ADDXri ||
5083              Inst.getOpcode() == AArch64::ADDWri))
5084           return false;
5085 
5086         // Don't allow symbol refs in the immediate field otherwise
5087         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5088         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5089         // 'cmp w0, 'borked')
5090         return Error(Loc.back(), "invalid immediate expression");
5091       }
5092       // We don't validate more complex expressions here
5093     }
5094     return false;
5095   }
5096   default:
5097     return false;
5098   }
5099 }
5100 
5101 static std::string AArch64MnemonicSpellCheck(StringRef S,
5102                                              const FeatureBitset &FBS,
5103                                              unsigned VariantID = 0);
5104 
5105 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5106                                       uint64_t ErrorInfo,
5107                                       OperandVector &Operands) {
5108   switch (ErrCode) {
5109   case Match_InvalidTiedOperand: {
5110     RegConstraintEqualityTy EqTy =
5111         static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
5112             .getRegEqualityTy();
5113     switch (EqTy) {
5114     case RegConstraintEqualityTy::EqualsSubReg:
5115       return Error(Loc, "operand must be 64-bit form of destination register");
5116     case RegConstraintEqualityTy::EqualsSuperReg:
5117       return Error(Loc, "operand must be 32-bit form of destination register");
5118     case RegConstraintEqualityTy::EqualsReg:
5119       return Error(Loc, "operand must match destination register");
5120     }
5121     llvm_unreachable("Unknown RegConstraintEqualityTy");
5122   }
5123   case Match_MissingFeature:
5124     return Error(Loc,
5125                  "instruction requires a CPU feature not currently enabled");
5126   case Match_InvalidOperand:
5127     return Error(Loc, "invalid operand for instruction");
5128   case Match_InvalidSuffix:
5129     return Error(Loc, "invalid type suffix for instruction");
5130   case Match_InvalidCondCode:
5131     return Error(Loc, "expected AArch64 condition code");
5132   case Match_AddSubRegExtendSmall:
5133     return Error(Loc,
5134       "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5135   case Match_AddSubRegExtendLarge:
5136     return Error(Loc,
5137       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5138   case Match_AddSubSecondSource:
5139     return Error(Loc,
5140       "expected compatible register, symbol or integer in range [0, 4095]");
5141   case Match_LogicalSecondSource:
5142     return Error(Loc, "expected compatible register or logical immediate");
5143   case Match_InvalidMovImm32Shift:
5144     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5145   case Match_InvalidMovImm64Shift:
5146     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5147   case Match_AddSubRegShift32:
5148     return Error(Loc,
5149        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5150   case Match_AddSubRegShift64:
5151     return Error(Loc,
5152        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5153   case Match_InvalidFPImm:
5154     return Error(Loc,
5155                  "expected compatible register or floating-point constant");
5156   case Match_InvalidMemoryIndexedSImm6:
5157     return Error(Loc, "index must be an integer in range [-32, 31].");
5158   case Match_InvalidMemoryIndexedSImm5:
5159     return Error(Loc, "index must be an integer in range [-16, 15].");
5160   case Match_InvalidMemoryIndexed1SImm4:
5161     return Error(Loc, "index must be an integer in range [-8, 7].");
5162   case Match_InvalidMemoryIndexed2SImm4:
5163     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5164   case Match_InvalidMemoryIndexed3SImm4:
5165     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5166   case Match_InvalidMemoryIndexed4SImm4:
5167     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5168   case Match_InvalidMemoryIndexed16SImm4:
5169     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5170   case Match_InvalidMemoryIndexed32SImm4:
5171     return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5172   case Match_InvalidMemoryIndexed1SImm6:
5173     return Error(Loc, "index must be an integer in range [-32, 31].");
5174   case Match_InvalidMemoryIndexedSImm8:
5175     return Error(Loc, "index must be an integer in range [-128, 127].");
5176   case Match_InvalidMemoryIndexedSImm9:
5177     return Error(Loc, "index must be an integer in range [-256, 255].");
5178   case Match_InvalidMemoryIndexed16SImm9:
5179     return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5180   case Match_InvalidMemoryIndexed8SImm10:
5181     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5182   case Match_InvalidMemoryIndexed4SImm7:
5183     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5184   case Match_InvalidMemoryIndexed8SImm7:
5185     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5186   case Match_InvalidMemoryIndexed16SImm7:
5187     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5188   case Match_InvalidMemoryIndexed8UImm5:
5189     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5190   case Match_InvalidMemoryIndexed4UImm5:
5191     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5192   case Match_InvalidMemoryIndexed2UImm5:
5193     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5194   case Match_InvalidMemoryIndexed8UImm6:
5195     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5196   case Match_InvalidMemoryIndexed16UImm6:
5197     return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5198   case Match_InvalidMemoryIndexed4UImm6:
5199     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5200   case Match_InvalidMemoryIndexed2UImm6:
5201     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5202   case Match_InvalidMemoryIndexed1UImm6:
5203     return Error(Loc, "index must be in range [0, 63].");
5204   case Match_InvalidMemoryWExtend8:
5205     return Error(Loc,
5206                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
5207   case Match_InvalidMemoryWExtend16:
5208     return Error(Loc,
5209                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5210   case Match_InvalidMemoryWExtend32:
5211     return Error(Loc,
5212                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5213   case Match_InvalidMemoryWExtend64:
5214     return Error(Loc,
5215                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5216   case Match_InvalidMemoryWExtend128:
5217     return Error(Loc,
5218                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5219   case Match_InvalidMemoryXExtend8:
5220     return Error(Loc,
5221                  "expected 'lsl' or 'sxtx' with optional shift of #0");
5222   case Match_InvalidMemoryXExtend16:
5223     return Error(Loc,
5224                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5225   case Match_InvalidMemoryXExtend32:
5226     return Error(Loc,
5227                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5228   case Match_InvalidMemoryXExtend64:
5229     return Error(Loc,
5230                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5231   case Match_InvalidMemoryXExtend128:
5232     return Error(Loc,
5233                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5234   case Match_InvalidMemoryIndexed1:
5235     return Error(Loc, "index must be an integer in range [0, 4095].");
5236   case Match_InvalidMemoryIndexed2:
5237     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5238   case Match_InvalidMemoryIndexed4:
5239     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5240   case Match_InvalidMemoryIndexed8:
5241     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5242   case Match_InvalidMemoryIndexed16:
5243     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5244   case Match_InvalidImm0_0:
5245     return Error(Loc, "immediate must be 0.");
5246   case Match_InvalidImm0_1:
5247     return Error(Loc, "immediate must be an integer in range [0, 1].");
5248   case Match_InvalidImm0_3:
5249     return Error(Loc, "immediate must be an integer in range [0, 3].");
5250   case Match_InvalidImm0_7:
5251     return Error(Loc, "immediate must be an integer in range [0, 7].");
5252   case Match_InvalidImm0_15:
5253     return Error(Loc, "immediate must be an integer in range [0, 15].");
5254   case Match_InvalidImm0_31:
5255     return Error(Loc, "immediate must be an integer in range [0, 31].");
5256   case Match_InvalidImm0_63:
5257     return Error(Loc, "immediate must be an integer in range [0, 63].");
5258   case Match_InvalidImm0_127:
5259     return Error(Loc, "immediate must be an integer in range [0, 127].");
5260   case Match_InvalidImm0_255:
5261     return Error(Loc, "immediate must be an integer in range [0, 255].");
5262   case Match_InvalidImm0_65535:
5263     return Error(Loc, "immediate must be an integer in range [0, 65535].");
5264   case Match_InvalidImm1_8:
5265     return Error(Loc, "immediate must be an integer in range [1, 8].");
5266   case Match_InvalidImm1_16:
5267     return Error(Loc, "immediate must be an integer in range [1, 16].");
5268   case Match_InvalidImm1_32:
5269     return Error(Loc, "immediate must be an integer in range [1, 32].");
5270   case Match_InvalidImm1_64:
5271     return Error(Loc, "immediate must be an integer in range [1, 64].");
5272   case Match_InvalidSVEAddSubImm8:
5273     return Error(Loc, "immediate must be an integer in range [0, 255]"
5274                       " with a shift amount of 0");
5275   case Match_InvalidSVEAddSubImm16:
5276   case Match_InvalidSVEAddSubImm32:
5277   case Match_InvalidSVEAddSubImm64:
5278     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5279                       "multiple of 256 in range [256, 65280]");
5280   case Match_InvalidSVECpyImm8:
5281     return Error(Loc, "immediate must be an integer in range [-128, 255]"
5282                       " with a shift amount of 0");
5283   case Match_InvalidSVECpyImm16:
5284     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5285                       "multiple of 256 in range [-32768, 65280]");
5286   case Match_InvalidSVECpyImm32:
5287   case Match_InvalidSVECpyImm64:
5288     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5289                       "multiple of 256 in range [-32768, 32512]");
5290   case Match_InvalidIndexRange0_0:
5291     return Error(Loc, "expected lane specifier '[0]'");
5292   case Match_InvalidIndexRange1_1:
5293     return Error(Loc, "expected lane specifier '[1]'");
5294   case Match_InvalidIndexRange0_15:
5295     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5296   case Match_InvalidIndexRange0_7:
5297     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5298   case Match_InvalidIndexRange0_3:
5299     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5300   case Match_InvalidIndexRange0_1:
5301     return Error(Loc, "vector lane must be an integer in range [0, 1].");
5302   case Match_InvalidSVEIndexRange0_63:
5303     return Error(Loc, "vector lane must be an integer in range [0, 63].");
5304   case Match_InvalidSVEIndexRange0_31:
5305     return Error(Loc, "vector lane must be an integer in range [0, 31].");
5306   case Match_InvalidSVEIndexRange0_15:
5307     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5308   case Match_InvalidSVEIndexRange0_7:
5309     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5310   case Match_InvalidSVEIndexRange0_3:
5311     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5312   case Match_InvalidLabel:
5313     return Error(Loc, "expected label or encodable integer pc offset");
5314   case Match_MRS:
5315     return Error(Loc, "expected readable system register");
5316   case Match_MSR:
5317   case Match_InvalidSVCR:
5318     return Error(Loc, "expected writable system register or pstate");
5319   case Match_InvalidComplexRotationEven:
5320     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5321   case Match_InvalidComplexRotationOdd:
5322     return Error(Loc, "complex rotation must be 90 or 270.");
5323   case Match_MnemonicFail: {
5324     std::string Suggestion = AArch64MnemonicSpellCheck(
5325         ((AArch64Operand &)*Operands[0]).getToken(),
5326         ComputeAvailableFeatures(STI->getFeatureBits()));
5327     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5328   }
5329   case Match_InvalidGPR64shifted8:
5330     return Error(Loc, "register must be x0..x30 or xzr, without shift");
5331   case Match_InvalidGPR64shifted16:
5332     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5333   case Match_InvalidGPR64shifted32:
5334     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5335   case Match_InvalidGPR64shifted64:
5336     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5337   case Match_InvalidGPR64shifted128:
5338     return Error(
5339         Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5340   case Match_InvalidGPR64NoXZRshifted8:
5341     return Error(Loc, "register must be x0..x30 without shift");
5342   case Match_InvalidGPR64NoXZRshifted16:
5343     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5344   case Match_InvalidGPR64NoXZRshifted32:
5345     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5346   case Match_InvalidGPR64NoXZRshifted64:
5347     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5348   case Match_InvalidGPR64NoXZRshifted128:
5349     return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5350   case Match_InvalidZPR32UXTW8:
5351   case Match_InvalidZPR32SXTW8:
5352     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5353   case Match_InvalidZPR32UXTW16:
5354   case Match_InvalidZPR32SXTW16:
5355     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5356   case Match_InvalidZPR32UXTW32:
5357   case Match_InvalidZPR32SXTW32:
5358     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5359   case Match_InvalidZPR32UXTW64:
5360   case Match_InvalidZPR32SXTW64:
5361     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5362   case Match_InvalidZPR64UXTW8:
5363   case Match_InvalidZPR64SXTW8:
5364     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5365   case Match_InvalidZPR64UXTW16:
5366   case Match_InvalidZPR64SXTW16:
5367     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5368   case Match_InvalidZPR64UXTW32:
5369   case Match_InvalidZPR64SXTW32:
5370     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5371   case Match_InvalidZPR64UXTW64:
5372   case Match_InvalidZPR64SXTW64:
5373     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5374   case Match_InvalidZPR32LSL8:
5375     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5376   case Match_InvalidZPR32LSL16:
5377     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5378   case Match_InvalidZPR32LSL32:
5379     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5380   case Match_InvalidZPR32LSL64:
5381     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5382   case Match_InvalidZPR64LSL8:
5383     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5384   case Match_InvalidZPR64LSL16:
5385     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5386   case Match_InvalidZPR64LSL32:
5387     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5388   case Match_InvalidZPR64LSL64:
5389     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5390   case Match_InvalidZPR0:
5391     return Error(Loc, "expected register without element width suffix");
5392   case Match_InvalidZPR8:
5393   case Match_InvalidZPR16:
5394   case Match_InvalidZPR32:
5395   case Match_InvalidZPR64:
5396   case Match_InvalidZPR128:
5397     return Error(Loc, "invalid element width");
5398   case Match_InvalidZPR_3b8:
5399     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5400   case Match_InvalidZPR_3b16:
5401     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5402   case Match_InvalidZPR_3b32:
5403     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5404   case Match_InvalidZPR_4b16:
5405     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5406   case Match_InvalidZPR_4b32:
5407     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5408   case Match_InvalidZPR_4b64:
5409     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5410   case Match_InvalidSVEPattern:
5411     return Error(Loc, "invalid predicate pattern");
5412   case Match_InvalidSVEPredicateAnyReg:
5413   case Match_InvalidSVEPredicateBReg:
5414   case Match_InvalidSVEPredicateHReg:
5415   case Match_InvalidSVEPredicateSReg:
5416   case Match_InvalidSVEPredicateDReg:
5417     return Error(Loc, "invalid predicate register.");
5418   case Match_InvalidSVEPredicate3bAnyReg:
5419     return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5420   case Match_InvalidSVEExactFPImmOperandHalfOne:
5421     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5422   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5423     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5424   case Match_InvalidSVEExactFPImmOperandZeroOne:
5425     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5426   case Match_InvalidMatrixTileVectorH8:
5427   case Match_InvalidMatrixTileVectorV8:
5428     return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5429   case Match_InvalidMatrixTileVectorH16:
5430   case Match_InvalidMatrixTileVectorV16:
5431     return Error(Loc,
5432                  "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5433   case Match_InvalidMatrixTileVectorH32:
5434   case Match_InvalidMatrixTileVectorV32:
5435     return Error(Loc,
5436                  "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5437   case Match_InvalidMatrixTileVectorH64:
5438   case Match_InvalidMatrixTileVectorV64:
5439     return Error(Loc,
5440                  "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5441   case Match_InvalidMatrixTileVectorH128:
5442   case Match_InvalidMatrixTileVectorV128:
5443     return Error(Loc,
5444                  "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5445   case Match_InvalidMatrixTile32:
5446     return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5447   case Match_InvalidMatrixTile64:
5448     return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5449   case Match_InvalidMatrix:
5450     return Error(Loc, "invalid matrix operand, expected za");
5451   case Match_InvalidMatrixIndexGPR32_12_15:
5452     return Error(Loc, "operand must be a register in range [w12, w15]");
5453   default:
5454     llvm_unreachable("unexpected error code!");
5455   }
5456 }
5457 
5458 static const char *getSubtargetFeatureName(uint64_t Val);
5459 
5460 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5461                                                OperandVector &Operands,
5462                                                MCStreamer &Out,
5463                                                uint64_t &ErrorInfo,
5464                                                bool MatchingInlineAsm) {
5465   assert(!Operands.empty() && "Unexpect empty operand list!");
5466   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5467   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
5468 
5469   StringRef Tok = Op.getToken();
5470   unsigned NumOperands = Operands.size();
5471 
5472   if (NumOperands == 4 && Tok == "lsl") {
5473     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5474     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5475     if (Op2.isScalarReg() && Op3.isImm()) {
5476       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5477       if (Op3CE) {
5478         uint64_t Op3Val = Op3CE->getValue();
5479         uint64_t NewOp3Val = 0;
5480         uint64_t NewOp4Val = 0;
5481         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5482                 Op2.getReg())) {
5483           NewOp3Val = (32 - Op3Val) & 0x1f;
5484           NewOp4Val = 31 - Op3Val;
5485         } else {
5486           NewOp3Val = (64 - Op3Val) & 0x3f;
5487           NewOp4Val = 63 - Op3Val;
5488         }
5489 
5490         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5491         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5492 
5493         Operands[0] =
5494             AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5495         Operands.push_back(AArch64Operand::CreateImm(
5496             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5497         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5498                                                 Op3.getEndLoc(), getContext());
5499       }
5500     }
5501   } else if (NumOperands == 4 && Tok == "bfc") {
5502     // FIXME: Horrible hack to handle BFC->BFM alias.
5503     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5504     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5505     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5506 
5507     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5508       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5509       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5510 
5511       if (LSBCE && WidthCE) {
5512         uint64_t LSB = LSBCE->getValue();
5513         uint64_t Width = WidthCE->getValue();
5514 
5515         uint64_t RegWidth = 0;
5516         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5517                 Op1.getReg()))
5518           RegWidth = 64;
5519         else
5520           RegWidth = 32;
5521 
5522         if (LSB >= RegWidth)
5523           return Error(LSBOp.getStartLoc(),
5524                        "expected integer in range [0, 31]");
5525         if (Width < 1 || Width > RegWidth)
5526           return Error(WidthOp.getStartLoc(),
5527                        "expected integer in range [1, 32]");
5528 
5529         uint64_t ImmR = 0;
5530         if (RegWidth == 32)
5531           ImmR = (32 - LSB) & 0x1f;
5532         else
5533           ImmR = (64 - LSB) & 0x3f;
5534 
5535         uint64_t ImmS = Width - 1;
5536 
5537         if (ImmR != 0 && ImmS >= ImmR)
5538           return Error(WidthOp.getStartLoc(),
5539                        "requested insert overflows register");
5540 
5541         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5542         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5543         Operands[0] =
5544             AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5545         Operands[2] = AArch64Operand::CreateReg(
5546             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5547             SMLoc(), SMLoc(), getContext());
5548         Operands[3] = AArch64Operand::CreateImm(
5549             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5550         Operands.emplace_back(
5551             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5552                                       WidthOp.getEndLoc(), getContext()));
5553       }
5554     }
5555   } else if (NumOperands == 5) {
5556     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5557     // UBFIZ -> UBFM aliases.
5558     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5559       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5560       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5561       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5562 
5563       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5564         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5565         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5566 
5567         if (Op3CE && Op4CE) {
5568           uint64_t Op3Val = Op3CE->getValue();
5569           uint64_t Op4Val = Op4CE->getValue();
5570 
5571           uint64_t RegWidth = 0;
5572           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5573                   Op1.getReg()))
5574             RegWidth = 64;
5575           else
5576             RegWidth = 32;
5577 
5578           if (Op3Val >= RegWidth)
5579             return Error(Op3.getStartLoc(),
5580                          "expected integer in range [0, 31]");
5581           if (Op4Val < 1 || Op4Val > RegWidth)
5582             return Error(Op4.getStartLoc(),
5583                          "expected integer in range [1, 32]");
5584 
5585           uint64_t NewOp3Val = 0;
5586           if (RegWidth == 32)
5587             NewOp3Val = (32 - Op3Val) & 0x1f;
5588           else
5589             NewOp3Val = (64 - Op3Val) & 0x3f;
5590 
5591           uint64_t NewOp4Val = Op4Val - 1;
5592 
5593           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5594             return Error(Op4.getStartLoc(),
5595                          "requested insert overflows register");
5596 
5597           const MCExpr *NewOp3 =
5598               MCConstantExpr::create(NewOp3Val, getContext());
5599           const MCExpr *NewOp4 =
5600               MCConstantExpr::create(NewOp4Val, getContext());
5601           Operands[3] = AArch64Operand::CreateImm(
5602               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5603           Operands[4] = AArch64Operand::CreateImm(
5604               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5605           if (Tok == "bfi")
5606             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5607                                                       getContext());
5608           else if (Tok == "sbfiz")
5609             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5610                                                       getContext());
5611           else if (Tok == "ubfiz")
5612             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5613                                                       getContext());
5614           else
5615             llvm_unreachable("No valid mnemonic for alias?");
5616         }
5617       }
5618 
5619       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5620       // UBFX -> UBFM aliases.
5621     } else if (NumOperands == 5 &&
5622                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5623       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5624       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5625       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5626 
5627       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5628         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5629         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5630 
5631         if (Op3CE && Op4CE) {
5632           uint64_t Op3Val = Op3CE->getValue();
5633           uint64_t Op4Val = Op4CE->getValue();
5634 
5635           uint64_t RegWidth = 0;
5636           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5637                   Op1.getReg()))
5638             RegWidth = 64;
5639           else
5640             RegWidth = 32;
5641 
5642           if (Op3Val >= RegWidth)
5643             return Error(Op3.getStartLoc(),
5644                          "expected integer in range [0, 31]");
5645           if (Op4Val < 1 || Op4Val > RegWidth)
5646             return Error(Op4.getStartLoc(),
5647                          "expected integer in range [1, 32]");
5648 
5649           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5650 
5651           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5652             return Error(Op4.getStartLoc(),
5653                          "requested extract overflows register");
5654 
5655           const MCExpr *NewOp4 =
5656               MCConstantExpr::create(NewOp4Val, getContext());
5657           Operands[4] = AArch64Operand::CreateImm(
5658               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5659           if (Tok == "bfxil")
5660             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5661                                                       getContext());
5662           else if (Tok == "sbfx")
5663             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5664                                                       getContext());
5665           else if (Tok == "ubfx")
5666             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5667                                                       getContext());
5668           else
5669             llvm_unreachable("No valid mnemonic for alias?");
5670         }
5671       }
5672     }
5673   }
5674 
5675   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5676   // instruction for FP registers correctly in some rare circumstances. Convert
5677   // it to a safe instruction and warn (because silently changing someone's
5678   // assembly is rude).
5679   if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5680       NumOperands == 4 && Tok == "movi") {
5681     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5682     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5683     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5684     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5685         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5686       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5687       if (Suffix.lower() == ".2d" &&
5688           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5689         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5690                 " correctly on this CPU, converting to equivalent movi.16b");
5691         // Switch the suffix to .16b.
5692         unsigned Idx = Op1.isToken() ? 1 : 2;
5693         Operands[Idx] =
5694             AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5695       }
5696     }
5697   }
5698 
5699   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5700   //        InstAlias can't quite handle this since the reg classes aren't
5701   //        subclasses.
5702   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5703     // The source register can be Wn here, but the matcher expects a
5704     // GPR64. Twiddle it here if necessary.
5705     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5706     if (Op.isScalarReg()) {
5707       unsigned Reg = getXRegFromWReg(Op.getReg());
5708       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5709                                               Op.getStartLoc(), Op.getEndLoc(),
5710                                               getContext());
5711     }
5712   }
5713   // FIXME: Likewise for sxt[bh] with a Xd dst operand
5714   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5715     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5716     if (Op.isScalarReg() &&
5717         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5718             Op.getReg())) {
5719       // The source register can be Wn here, but the matcher expects a
5720       // GPR64. Twiddle it here if necessary.
5721       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5722       if (Op.isScalarReg()) {
5723         unsigned Reg = getXRegFromWReg(Op.getReg());
5724         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5725                                                 Op.getStartLoc(),
5726                                                 Op.getEndLoc(), getContext());
5727       }
5728     }
5729   }
5730   // FIXME: Likewise for uxt[bh] with a Xd dst operand
5731   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5732     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5733     if (Op.isScalarReg() &&
5734         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5735             Op.getReg())) {
5736       // The source register can be Wn here, but the matcher expects a
5737       // GPR32. Twiddle it here if necessary.
5738       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5739       if (Op.isScalarReg()) {
5740         unsigned Reg = getWRegFromXReg(Op.getReg());
5741         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5742                                                 Op.getStartLoc(),
5743                                                 Op.getEndLoc(), getContext());
5744       }
5745     }
5746   }
5747 
5748   MCInst Inst;
5749   FeatureBitset MissingFeatures;
5750   // First try to match against the secondary set of tables containing the
5751   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5752   unsigned MatchResult =
5753       MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5754                            MatchingInlineAsm, 1);
5755 
5756   // If that fails, try against the alternate table containing long-form NEON:
5757   // "fadd v0.2s, v1.2s, v2.2s"
5758   if (MatchResult != Match_Success) {
5759     // But first, save the short-form match result: we can use it in case the
5760     // long-form match also fails.
5761     auto ShortFormNEONErrorInfo = ErrorInfo;
5762     auto ShortFormNEONMatchResult = MatchResult;
5763     auto ShortFormNEONMissingFeatures = MissingFeatures;
5764 
5765     MatchResult =
5766         MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5767                              MatchingInlineAsm, 0);
5768 
5769     // Now, both matches failed, and the long-form match failed on the mnemonic
5770     // suffix token operand.  The short-form match failure is probably more
5771     // relevant: use it instead.
5772     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5773         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5774         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5775       MatchResult = ShortFormNEONMatchResult;
5776       ErrorInfo = ShortFormNEONErrorInfo;
5777       MissingFeatures = ShortFormNEONMissingFeatures;
5778     }
5779   }
5780 
5781   switch (MatchResult) {
5782   case Match_Success: {
5783     // Perform range checking and other semantic validations
5784     SmallVector<SMLoc, 8> OperandLocs;
5785     NumOperands = Operands.size();
5786     for (unsigned i = 1; i < NumOperands; ++i)
5787       OperandLocs.push_back(Operands[i]->getStartLoc());
5788     if (validateInstruction(Inst, IDLoc, OperandLocs))
5789       return true;
5790 
5791     Inst.setLoc(IDLoc);
5792     Out.emitInstruction(Inst, getSTI());
5793     return false;
5794   }
5795   case Match_MissingFeature: {
5796     assert(MissingFeatures.any() && "Unknown missing feature!");
5797     // Special case the error message for the very common case where only
5798     // a single subtarget feature is missing (neon, e.g.).
5799     std::string Msg = "instruction requires:";
5800     for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5801       if (MissingFeatures[i]) {
5802         Msg += " ";
5803         Msg += getSubtargetFeatureName(i);
5804       }
5805     }
5806     return Error(IDLoc, Msg);
5807   }
5808   case Match_MnemonicFail:
5809     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5810   case Match_InvalidOperand: {
5811     SMLoc ErrorLoc = IDLoc;
5812 
5813     if (ErrorInfo != ~0ULL) {
5814       if (ErrorInfo >= Operands.size())
5815         return Error(IDLoc, "too few operands for instruction",
5816                      SMRange(IDLoc, getTok().getLoc()));
5817 
5818       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5819       if (ErrorLoc == SMLoc())
5820         ErrorLoc = IDLoc;
5821     }
5822     // If the match failed on a suffix token operand, tweak the diagnostic
5823     // accordingly.
5824     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5825         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5826       MatchResult = Match_InvalidSuffix;
5827 
5828     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5829   }
5830   case Match_InvalidTiedOperand:
5831   case Match_InvalidMemoryIndexed1:
5832   case Match_InvalidMemoryIndexed2:
5833   case Match_InvalidMemoryIndexed4:
5834   case Match_InvalidMemoryIndexed8:
5835   case Match_InvalidMemoryIndexed16:
5836   case Match_InvalidCondCode:
5837   case Match_AddSubRegExtendSmall:
5838   case Match_AddSubRegExtendLarge:
5839   case Match_AddSubSecondSource:
5840   case Match_LogicalSecondSource:
5841   case Match_AddSubRegShift32:
5842   case Match_AddSubRegShift64:
5843   case Match_InvalidMovImm32Shift:
5844   case Match_InvalidMovImm64Shift:
5845   case Match_InvalidFPImm:
5846   case Match_InvalidMemoryWExtend8:
5847   case Match_InvalidMemoryWExtend16:
5848   case Match_InvalidMemoryWExtend32:
5849   case Match_InvalidMemoryWExtend64:
5850   case Match_InvalidMemoryWExtend128:
5851   case Match_InvalidMemoryXExtend8:
5852   case Match_InvalidMemoryXExtend16:
5853   case Match_InvalidMemoryXExtend32:
5854   case Match_InvalidMemoryXExtend64:
5855   case Match_InvalidMemoryXExtend128:
5856   case Match_InvalidMemoryIndexed1SImm4:
5857   case Match_InvalidMemoryIndexed2SImm4:
5858   case Match_InvalidMemoryIndexed3SImm4:
5859   case Match_InvalidMemoryIndexed4SImm4:
5860   case Match_InvalidMemoryIndexed1SImm6:
5861   case Match_InvalidMemoryIndexed16SImm4:
5862   case Match_InvalidMemoryIndexed32SImm4:
5863   case Match_InvalidMemoryIndexed4SImm7:
5864   case Match_InvalidMemoryIndexed8SImm7:
5865   case Match_InvalidMemoryIndexed16SImm7:
5866   case Match_InvalidMemoryIndexed8UImm5:
5867   case Match_InvalidMemoryIndexed4UImm5:
5868   case Match_InvalidMemoryIndexed2UImm5:
5869   case Match_InvalidMemoryIndexed1UImm6:
5870   case Match_InvalidMemoryIndexed2UImm6:
5871   case Match_InvalidMemoryIndexed4UImm6:
5872   case Match_InvalidMemoryIndexed8UImm6:
5873   case Match_InvalidMemoryIndexed16UImm6:
5874   case Match_InvalidMemoryIndexedSImm6:
5875   case Match_InvalidMemoryIndexedSImm5:
5876   case Match_InvalidMemoryIndexedSImm8:
5877   case Match_InvalidMemoryIndexedSImm9:
5878   case Match_InvalidMemoryIndexed16SImm9:
5879   case Match_InvalidMemoryIndexed8SImm10:
5880   case Match_InvalidImm0_0:
5881   case Match_InvalidImm0_1:
5882   case Match_InvalidImm0_3:
5883   case Match_InvalidImm0_7:
5884   case Match_InvalidImm0_15:
5885   case Match_InvalidImm0_31:
5886   case Match_InvalidImm0_63:
5887   case Match_InvalidImm0_127:
5888   case Match_InvalidImm0_255:
5889   case Match_InvalidImm0_65535:
5890   case Match_InvalidImm1_8:
5891   case Match_InvalidImm1_16:
5892   case Match_InvalidImm1_32:
5893   case Match_InvalidImm1_64:
5894   case Match_InvalidSVEAddSubImm8:
5895   case Match_InvalidSVEAddSubImm16:
5896   case Match_InvalidSVEAddSubImm32:
5897   case Match_InvalidSVEAddSubImm64:
5898   case Match_InvalidSVECpyImm8:
5899   case Match_InvalidSVECpyImm16:
5900   case Match_InvalidSVECpyImm32:
5901   case Match_InvalidSVECpyImm64:
5902   case Match_InvalidIndexRange0_0:
5903   case Match_InvalidIndexRange1_1:
5904   case Match_InvalidIndexRange0_15:
5905   case Match_InvalidIndexRange0_7:
5906   case Match_InvalidIndexRange0_3:
5907   case Match_InvalidIndexRange0_1:
5908   case Match_InvalidSVEIndexRange0_63:
5909   case Match_InvalidSVEIndexRange0_31:
5910   case Match_InvalidSVEIndexRange0_15:
5911   case Match_InvalidSVEIndexRange0_7:
5912   case Match_InvalidSVEIndexRange0_3:
5913   case Match_InvalidLabel:
5914   case Match_InvalidComplexRotationEven:
5915   case Match_InvalidComplexRotationOdd:
5916   case Match_InvalidGPR64shifted8:
5917   case Match_InvalidGPR64shifted16:
5918   case Match_InvalidGPR64shifted32:
5919   case Match_InvalidGPR64shifted64:
5920   case Match_InvalidGPR64shifted128:
5921   case Match_InvalidGPR64NoXZRshifted8:
5922   case Match_InvalidGPR64NoXZRshifted16:
5923   case Match_InvalidGPR64NoXZRshifted32:
5924   case Match_InvalidGPR64NoXZRshifted64:
5925   case Match_InvalidGPR64NoXZRshifted128:
5926   case Match_InvalidZPR32UXTW8:
5927   case Match_InvalidZPR32UXTW16:
5928   case Match_InvalidZPR32UXTW32:
5929   case Match_InvalidZPR32UXTW64:
5930   case Match_InvalidZPR32SXTW8:
5931   case Match_InvalidZPR32SXTW16:
5932   case Match_InvalidZPR32SXTW32:
5933   case Match_InvalidZPR32SXTW64:
5934   case Match_InvalidZPR64UXTW8:
5935   case Match_InvalidZPR64SXTW8:
5936   case Match_InvalidZPR64UXTW16:
5937   case Match_InvalidZPR64SXTW16:
5938   case Match_InvalidZPR64UXTW32:
5939   case Match_InvalidZPR64SXTW32:
5940   case Match_InvalidZPR64UXTW64:
5941   case Match_InvalidZPR64SXTW64:
5942   case Match_InvalidZPR32LSL8:
5943   case Match_InvalidZPR32LSL16:
5944   case Match_InvalidZPR32LSL32:
5945   case Match_InvalidZPR32LSL64:
5946   case Match_InvalidZPR64LSL8:
5947   case Match_InvalidZPR64LSL16:
5948   case Match_InvalidZPR64LSL32:
5949   case Match_InvalidZPR64LSL64:
5950   case Match_InvalidZPR0:
5951   case Match_InvalidZPR8:
5952   case Match_InvalidZPR16:
5953   case Match_InvalidZPR32:
5954   case Match_InvalidZPR64:
5955   case Match_InvalidZPR128:
5956   case Match_InvalidZPR_3b8:
5957   case Match_InvalidZPR_3b16:
5958   case Match_InvalidZPR_3b32:
5959   case Match_InvalidZPR_4b16:
5960   case Match_InvalidZPR_4b32:
5961   case Match_InvalidZPR_4b64:
5962   case Match_InvalidSVEPredicateAnyReg:
5963   case Match_InvalidSVEPattern:
5964   case Match_InvalidSVEPredicateBReg:
5965   case Match_InvalidSVEPredicateHReg:
5966   case Match_InvalidSVEPredicateSReg:
5967   case Match_InvalidSVEPredicateDReg:
5968   case Match_InvalidSVEPredicate3bAnyReg:
5969   case Match_InvalidSVEExactFPImmOperandHalfOne:
5970   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5971   case Match_InvalidSVEExactFPImmOperandZeroOne:
5972   case Match_InvalidMatrixTile32:
5973   case Match_InvalidMatrixTile64:
5974   case Match_InvalidMatrix:
5975   case Match_InvalidMatrixTileVectorH8:
5976   case Match_InvalidMatrixTileVectorH16:
5977   case Match_InvalidMatrixTileVectorH32:
5978   case Match_InvalidMatrixTileVectorH64:
5979   case Match_InvalidMatrixTileVectorH128:
5980   case Match_InvalidMatrixTileVectorV8:
5981   case Match_InvalidMatrixTileVectorV16:
5982   case Match_InvalidMatrixTileVectorV32:
5983   case Match_InvalidMatrixTileVectorV64:
5984   case Match_InvalidMatrixTileVectorV128:
5985   case Match_InvalidSVCR:
5986   case Match_InvalidMatrixIndexGPR32_12_15:
5987   case Match_MSR:
5988   case Match_MRS: {
5989     if (ErrorInfo >= Operands.size())
5990       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5991     // Any time we get here, there's nothing fancy to do. Just get the
5992     // operand SMLoc and display the diagnostic.
5993     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5994     if (ErrorLoc == SMLoc())
5995       ErrorLoc = IDLoc;
5996     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5997   }
5998   }
5999 
6000   llvm_unreachable("Implement any new match types added!");
6001 }
6002 
6003 /// ParseDirective parses the arm specific directives
6004 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6005   const MCContext::Environment Format = getContext().getObjectFileType();
6006   bool IsMachO = Format == MCContext::IsMachO;
6007   bool IsCOFF = Format == MCContext::IsCOFF;
6008 
6009   auto IDVal = DirectiveID.getIdentifier().lower();
6010   SMLoc Loc = DirectiveID.getLoc();
6011   if (IDVal == ".arch")
6012     parseDirectiveArch(Loc);
6013   else if (IDVal == ".cpu")
6014     parseDirectiveCPU(Loc);
6015   else if (IDVal == ".tlsdesccall")
6016     parseDirectiveTLSDescCall(Loc);
6017   else if (IDVal == ".ltorg" || IDVal == ".pool")
6018     parseDirectiveLtorg(Loc);
6019   else if (IDVal == ".unreq")
6020     parseDirectiveUnreq(Loc);
6021   else if (IDVal == ".inst")
6022     parseDirectiveInst(Loc);
6023   else if (IDVal == ".cfi_negate_ra_state")
6024     parseDirectiveCFINegateRAState();
6025   else if (IDVal == ".cfi_b_key_frame")
6026     parseDirectiveCFIBKeyFrame();
6027   else if (IDVal == ".arch_extension")
6028     parseDirectiveArchExtension(Loc);
6029   else if (IDVal == ".variant_pcs")
6030     parseDirectiveVariantPCS(Loc);
6031   else if (IsMachO) {
6032     if (IDVal == MCLOHDirectiveName())
6033       parseDirectiveLOH(IDVal, Loc);
6034     else
6035       return true;
6036   } else if (IsCOFF) {
6037     if (IDVal == ".seh_stackalloc")
6038       parseDirectiveSEHAllocStack(Loc);
6039     else if (IDVal == ".seh_endprologue")
6040       parseDirectiveSEHPrologEnd(Loc);
6041     else if (IDVal == ".seh_save_r19r20_x")
6042       parseDirectiveSEHSaveR19R20X(Loc);
6043     else if (IDVal == ".seh_save_fplr")
6044       parseDirectiveSEHSaveFPLR(Loc);
6045     else if (IDVal == ".seh_save_fplr_x")
6046       parseDirectiveSEHSaveFPLRX(Loc);
6047     else if (IDVal == ".seh_save_reg")
6048       parseDirectiveSEHSaveReg(Loc);
6049     else if (IDVal == ".seh_save_reg_x")
6050       parseDirectiveSEHSaveRegX(Loc);
6051     else if (IDVal == ".seh_save_regp")
6052       parseDirectiveSEHSaveRegP(Loc);
6053     else if (IDVal == ".seh_save_regp_x")
6054       parseDirectiveSEHSaveRegPX(Loc);
6055     else if (IDVal == ".seh_save_lrpair")
6056       parseDirectiveSEHSaveLRPair(Loc);
6057     else if (IDVal == ".seh_save_freg")
6058       parseDirectiveSEHSaveFReg(Loc);
6059     else if (IDVal == ".seh_save_freg_x")
6060       parseDirectiveSEHSaveFRegX(Loc);
6061     else if (IDVal == ".seh_save_fregp")
6062       parseDirectiveSEHSaveFRegP(Loc);
6063     else if (IDVal == ".seh_save_fregp_x")
6064       parseDirectiveSEHSaveFRegPX(Loc);
6065     else if (IDVal == ".seh_set_fp")
6066       parseDirectiveSEHSetFP(Loc);
6067     else if (IDVal == ".seh_add_fp")
6068       parseDirectiveSEHAddFP(Loc);
6069     else if (IDVal == ".seh_nop")
6070       parseDirectiveSEHNop(Loc);
6071     else if (IDVal == ".seh_save_next")
6072       parseDirectiveSEHSaveNext(Loc);
6073     else if (IDVal == ".seh_startepilogue")
6074       parseDirectiveSEHEpilogStart(Loc);
6075     else if (IDVal == ".seh_endepilogue")
6076       parseDirectiveSEHEpilogEnd(Loc);
6077     else if (IDVal == ".seh_trap_frame")
6078       parseDirectiveSEHTrapFrame(Loc);
6079     else if (IDVal == ".seh_pushframe")
6080       parseDirectiveSEHMachineFrame(Loc);
6081     else if (IDVal == ".seh_context")
6082       parseDirectiveSEHContext(Loc);
6083     else if (IDVal == ".seh_clear_unwound_to_call")
6084       parseDirectiveSEHClearUnwoundToCall(Loc);
6085     else
6086       return true;
6087   } else
6088     return true;
6089   return false;
6090 }
6091 
6092 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
6093                             SmallVector<StringRef, 4> &RequestedExtensions) {
6094   const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6095   const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6096 
6097   if (!NoCrypto && Crypto) {
6098     switch (ArchKind) {
6099     default:
6100       // Map 'generic' (and others) to sha2 and aes, because
6101       // that was the traditional meaning of crypto.
6102     case AArch64::ArchKind::ARMV8_1A:
6103     case AArch64::ArchKind::ARMV8_2A:
6104     case AArch64::ArchKind::ARMV8_3A:
6105       RequestedExtensions.push_back("sha2");
6106       RequestedExtensions.push_back("aes");
6107       break;
6108     case AArch64::ArchKind::ARMV8_4A:
6109     case AArch64::ArchKind::ARMV8_5A:
6110     case AArch64::ArchKind::ARMV8_6A:
6111     case AArch64::ArchKind::ARMV8_7A:
6112     case AArch64::ArchKind::ARMV8_8A:
6113     case AArch64::ArchKind::ARMV9A:
6114     case AArch64::ArchKind::ARMV9_1A:
6115     case AArch64::ArchKind::ARMV9_2A:
6116     case AArch64::ArchKind::ARMV9_3A:
6117     case AArch64::ArchKind::ARMV8R:
6118       RequestedExtensions.push_back("sm4");
6119       RequestedExtensions.push_back("sha3");
6120       RequestedExtensions.push_back("sha2");
6121       RequestedExtensions.push_back("aes");
6122       break;
6123     }
6124   } else if (NoCrypto) {
6125     switch (ArchKind) {
6126     default:
6127       // Map 'generic' (and others) to sha2 and aes, because
6128       // that was the traditional meaning of crypto.
6129     case AArch64::ArchKind::ARMV8_1A:
6130     case AArch64::ArchKind::ARMV8_2A:
6131     case AArch64::ArchKind::ARMV8_3A:
6132       RequestedExtensions.push_back("nosha2");
6133       RequestedExtensions.push_back("noaes");
6134       break;
6135     case AArch64::ArchKind::ARMV8_4A:
6136     case AArch64::ArchKind::ARMV8_5A:
6137     case AArch64::ArchKind::ARMV8_6A:
6138     case AArch64::ArchKind::ARMV8_7A:
6139     case AArch64::ArchKind::ARMV8_8A:
6140     case AArch64::ArchKind::ARMV9A:
6141     case AArch64::ArchKind::ARMV9_1A:
6142     case AArch64::ArchKind::ARMV9_2A:
6143       RequestedExtensions.push_back("nosm4");
6144       RequestedExtensions.push_back("nosha3");
6145       RequestedExtensions.push_back("nosha2");
6146       RequestedExtensions.push_back("noaes");
6147       break;
6148     }
6149   }
6150 }
6151 
6152 /// parseDirectiveArch
6153 ///   ::= .arch token
6154 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6155   SMLoc ArchLoc = getLoc();
6156 
6157   StringRef Arch, ExtensionString;
6158   std::tie(Arch, ExtensionString) =
6159       getParser().parseStringToEndOfStatement().trim().split('+');
6160 
6161   AArch64::ArchKind ID = AArch64::parseArch(Arch);
6162   if (ID == AArch64::ArchKind::INVALID)
6163     return Error(ArchLoc, "unknown arch name");
6164 
6165   if (parseToken(AsmToken::EndOfStatement))
6166     return true;
6167 
6168   // Get the architecture and extension features.
6169   std::vector<StringRef> AArch64Features;
6170   AArch64::getArchFeatures(ID, AArch64Features);
6171   AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
6172                                 AArch64Features);
6173 
6174   MCSubtargetInfo &STI = copySTI();
6175   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6176   STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6177                          join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6178 
6179   SmallVector<StringRef, 4> RequestedExtensions;
6180   if (!ExtensionString.empty())
6181     ExtensionString.split(RequestedExtensions, '+');
6182 
6183   ExpandCryptoAEK(ID, RequestedExtensions);
6184 
6185   FeatureBitset Features = STI.getFeatureBits();
6186   for (auto Name : RequestedExtensions) {
6187     bool EnableFeature = true;
6188 
6189     if (Name.startswith_insensitive("no")) {
6190       EnableFeature = false;
6191       Name = Name.substr(2);
6192     }
6193 
6194     for (const auto &Extension : ExtensionMap) {
6195       if (Extension.Name != Name)
6196         continue;
6197 
6198       if (Extension.Features.none())
6199         report_fatal_error("unsupported architectural extension: " + Name);
6200 
6201       FeatureBitset ToggleFeatures = EnableFeature
6202                                          ? (~Features & Extension.Features)
6203                                          : ( Features & Extension.Features);
6204       FeatureBitset Features =
6205           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6206       setAvailableFeatures(Features);
6207       break;
6208     }
6209   }
6210   return false;
6211 }
6212 
6213 /// parseDirectiveArchExtension
6214 ///   ::= .arch_extension [no]feature
6215 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6216   SMLoc ExtLoc = getLoc();
6217 
6218   StringRef Name = getParser().parseStringToEndOfStatement().trim();
6219 
6220   if (parseToken(AsmToken::EndOfStatement,
6221                  "unexpected token in '.arch_extension' directive"))
6222     return true;
6223 
6224   bool EnableFeature = true;
6225   if (Name.startswith_insensitive("no")) {
6226     EnableFeature = false;
6227     Name = Name.substr(2);
6228   }
6229 
6230   MCSubtargetInfo &STI = copySTI();
6231   FeatureBitset Features = STI.getFeatureBits();
6232   for (const auto &Extension : ExtensionMap) {
6233     if (Extension.Name != Name)
6234       continue;
6235 
6236     if (Extension.Features.none())
6237       return Error(ExtLoc, "unsupported architectural extension: " + Name);
6238 
6239     FeatureBitset ToggleFeatures = EnableFeature
6240                                        ? (~Features & Extension.Features)
6241                                        : (Features & Extension.Features);
6242     FeatureBitset Features =
6243         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6244     setAvailableFeatures(Features);
6245     return false;
6246   }
6247 
6248   return Error(ExtLoc, "unknown architectural extension: " + Name);
6249 }
6250 
6251 static SMLoc incrementLoc(SMLoc L, int Offset) {
6252   return SMLoc::getFromPointer(L.getPointer() + Offset);
6253 }
6254 
6255 /// parseDirectiveCPU
6256 ///   ::= .cpu id
6257 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6258   SMLoc CurLoc = getLoc();
6259 
6260   StringRef CPU, ExtensionString;
6261   std::tie(CPU, ExtensionString) =
6262       getParser().parseStringToEndOfStatement().trim().split('+');
6263 
6264   if (parseToken(AsmToken::EndOfStatement))
6265     return true;
6266 
6267   SmallVector<StringRef, 4> RequestedExtensions;
6268   if (!ExtensionString.empty())
6269     ExtensionString.split(RequestedExtensions, '+');
6270 
6271   // FIXME This is using tablegen data, but should be moved to ARMTargetParser
6272   // once that is tablegen'ed
6273   if (!getSTI().isCPUStringValid(CPU)) {
6274     Error(CurLoc, "unknown CPU name");
6275     return false;
6276   }
6277 
6278   MCSubtargetInfo &STI = copySTI();
6279   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
6280   CurLoc = incrementLoc(CurLoc, CPU.size());
6281 
6282   ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
6283 
6284   FeatureBitset Features = STI.getFeatureBits();
6285   for (auto Name : RequestedExtensions) {
6286     // Advance source location past '+'.
6287     CurLoc = incrementLoc(CurLoc, 1);
6288 
6289     bool EnableFeature = true;
6290 
6291     if (Name.startswith_insensitive("no")) {
6292       EnableFeature = false;
6293       Name = Name.substr(2);
6294     }
6295 
6296     bool FoundExtension = false;
6297     for (const auto &Extension : ExtensionMap) {
6298       if (Extension.Name != Name)
6299         continue;
6300 
6301       if (Extension.Features.none())
6302         report_fatal_error("unsupported architectural extension: " + Name);
6303 
6304       FeatureBitset ToggleFeatures = EnableFeature
6305                                          ? (~Features & Extension.Features)
6306                                          : ( Features & Extension.Features);
6307       FeatureBitset Features =
6308           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6309       setAvailableFeatures(Features);
6310       FoundExtension = true;
6311 
6312       break;
6313     }
6314 
6315     if (!FoundExtension)
6316       Error(CurLoc, "unsupported architectural extension");
6317 
6318     CurLoc = incrementLoc(CurLoc, Name.size());
6319   }
6320   return false;
6321 }
6322 
6323 /// parseDirectiveInst
6324 ///  ::= .inst opcode [, ...]
6325 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
6326   if (getLexer().is(AsmToken::EndOfStatement))
6327     return Error(Loc, "expected expression following '.inst' directive");
6328 
6329   auto parseOp = [&]() -> bool {
6330     SMLoc L = getLoc();
6331     const MCExpr *Expr = nullptr;
6332     if (check(getParser().parseExpression(Expr), L, "expected expression"))
6333       return true;
6334     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6335     if (check(!Value, L, "expected constant expression"))
6336       return true;
6337     getTargetStreamer().emitInst(Value->getValue());
6338     return false;
6339   };
6340 
6341   return parseMany(parseOp);
6342 }
6343 
6344 // parseDirectiveTLSDescCall:
6345 //   ::= .tlsdesccall symbol
6346 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
6347   StringRef Name;
6348   if (check(getParser().parseIdentifier(Name), L,
6349             "expected symbol after directive") ||
6350       parseToken(AsmToken::EndOfStatement))
6351     return true;
6352 
6353   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
6354   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
6355   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
6356 
6357   MCInst Inst;
6358   Inst.setOpcode(AArch64::TLSDESCCALL);
6359   Inst.addOperand(MCOperand::createExpr(Expr));
6360 
6361   getParser().getStreamer().emitInstruction(Inst, getSTI());
6362   return false;
6363 }
6364 
6365 /// ::= .loh <lohName | lohId> label1, ..., labelN
6366 /// The number of arguments depends on the loh identifier.
6367 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
6368   MCLOHType Kind;
6369   if (getTok().isNot(AsmToken::Identifier)) {
6370     if (getTok().isNot(AsmToken::Integer))
6371       return TokError("expected an identifier or a number in directive");
6372     // We successfully get a numeric value for the identifier.
6373     // Check if it is valid.
6374     int64_t Id = getTok().getIntVal();
6375     if (Id <= -1U && !isValidMCLOHType(Id))
6376       return TokError("invalid numeric identifier in directive");
6377     Kind = (MCLOHType)Id;
6378   } else {
6379     StringRef Name = getTok().getIdentifier();
6380     // We successfully parse an identifier.
6381     // Check if it is a recognized one.
6382     int Id = MCLOHNameToId(Name);
6383 
6384     if (Id == -1)
6385       return TokError("invalid identifier in directive");
6386     Kind = (MCLOHType)Id;
6387   }
6388   // Consume the identifier.
6389   Lex();
6390   // Get the number of arguments of this LOH.
6391   int NbArgs = MCLOHIdToNbArgs(Kind);
6392 
6393   assert(NbArgs != -1 && "Invalid number of arguments");
6394 
6395   SmallVector<MCSymbol *, 3> Args;
6396   for (int Idx = 0; Idx < NbArgs; ++Idx) {
6397     StringRef Name;
6398     if (getParser().parseIdentifier(Name))
6399       return TokError("expected identifier in directive");
6400     Args.push_back(getContext().getOrCreateSymbol(Name));
6401 
6402     if (Idx + 1 == NbArgs)
6403       break;
6404     if (parseToken(AsmToken::Comma,
6405                    "unexpected token in '" + Twine(IDVal) + "' directive"))
6406       return true;
6407   }
6408   if (parseToken(AsmToken::EndOfStatement,
6409                  "unexpected token in '" + Twine(IDVal) + "' directive"))
6410     return true;
6411 
6412   getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
6413   return false;
6414 }
6415 
6416 /// parseDirectiveLtorg
6417 ///  ::= .ltorg | .pool
6418 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
6419   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
6420     return true;
6421   getTargetStreamer().emitCurrentConstantPool();
6422   return false;
6423 }
6424 
6425 /// parseDirectiveReq
6426 ///  ::= name .req registername
6427 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6428   Lex(); // Eat the '.req' token.
6429   SMLoc SRegLoc = getLoc();
6430   RegKind RegisterKind = RegKind::Scalar;
6431   unsigned RegNum;
6432   OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
6433 
6434   if (ParseRes != MatchOperand_Success) {
6435     StringRef Kind;
6436     RegisterKind = RegKind::NeonVector;
6437     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
6438 
6439     if (ParseRes == MatchOperand_ParseFail)
6440       return true;
6441 
6442     if (ParseRes == MatchOperand_Success && !Kind.empty())
6443       return Error(SRegLoc, "vector register without type specifier expected");
6444   }
6445 
6446   if (ParseRes != MatchOperand_Success) {
6447     StringRef Kind;
6448     RegisterKind = RegKind::SVEDataVector;
6449     ParseRes =
6450         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6451 
6452     if (ParseRes == MatchOperand_ParseFail)
6453       return true;
6454 
6455     if (ParseRes == MatchOperand_Success && !Kind.empty())
6456       return Error(SRegLoc,
6457                    "sve vector register without type specifier expected");
6458   }
6459 
6460   if (ParseRes != MatchOperand_Success) {
6461     StringRef Kind;
6462     RegisterKind = RegKind::SVEPredicateVector;
6463     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
6464 
6465     if (ParseRes == MatchOperand_ParseFail)
6466       return true;
6467 
6468     if (ParseRes == MatchOperand_Success && !Kind.empty())
6469       return Error(SRegLoc,
6470                    "sve predicate register without type specifier expected");
6471   }
6472 
6473   if (ParseRes != MatchOperand_Success)
6474     return Error(SRegLoc, "register name or alias expected");
6475 
6476   // Shouldn't be anything else.
6477   if (parseToken(AsmToken::EndOfStatement,
6478                  "unexpected input in .req directive"))
6479     return true;
6480 
6481   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
6482   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
6483     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
6484 
6485   return false;
6486 }
6487 
6488 /// parseDirectiveUneq
6489 ///  ::= .unreq registername
6490 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
6491   if (getTok().isNot(AsmToken::Identifier))
6492     return TokError("unexpected input in .unreq directive.");
6493   RegisterReqs.erase(getTok().getIdentifier().lower());
6494   Lex(); // Eat the identifier.
6495   return parseToken(AsmToken::EndOfStatement);
6496 }
6497 
6498 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
6499   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
6500     return true;
6501   getStreamer().emitCFINegateRAState();
6502   return false;
6503 }
6504 
6505 /// parseDirectiveCFIBKeyFrame
6506 /// ::= .cfi_b_key
6507 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
6508   if (parseToken(AsmToken::EndOfStatement,
6509                  "unexpected token in '.cfi_b_key_frame'"))
6510     return true;
6511   getStreamer().emitCFIBKeyFrame();
6512   return false;
6513 }
6514 
6515 /// parseDirectiveVariantPCS
6516 /// ::= .variant_pcs symbolname
6517 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
6518   StringRef Name;
6519   if (getParser().parseIdentifier(Name))
6520     return TokError("expected symbol name");
6521   if (parseEOL())
6522     return true;
6523   getTargetStreamer().emitDirectiveVariantPCS(
6524       getContext().getOrCreateSymbol(Name));
6525   return false;
6526 }
6527 
6528 /// parseDirectiveSEHAllocStack
6529 /// ::= .seh_stackalloc
6530 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
6531   int64_t Size;
6532   if (parseImmExpr(Size))
6533     return true;
6534   getTargetStreamer().emitARM64WinCFIAllocStack(Size);
6535   return false;
6536 }
6537 
6538 /// parseDirectiveSEHPrologEnd
6539 /// ::= .seh_endprologue
6540 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
6541   getTargetStreamer().emitARM64WinCFIPrologEnd();
6542   return false;
6543 }
6544 
6545 /// parseDirectiveSEHSaveR19R20X
6546 /// ::= .seh_save_r19r20_x
6547 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
6548   int64_t Offset;
6549   if (parseImmExpr(Offset))
6550     return true;
6551   getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
6552   return false;
6553 }
6554 
6555 /// parseDirectiveSEHSaveFPLR
6556 /// ::= .seh_save_fplr
6557 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
6558   int64_t Offset;
6559   if (parseImmExpr(Offset))
6560     return true;
6561   getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
6562   return false;
6563 }
6564 
6565 /// parseDirectiveSEHSaveFPLRX
6566 /// ::= .seh_save_fplr_x
6567 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
6568   int64_t Offset;
6569   if (parseImmExpr(Offset))
6570     return true;
6571   getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
6572   return false;
6573 }
6574 
6575 /// parseDirectiveSEHSaveReg
6576 /// ::= .seh_save_reg
6577 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
6578   unsigned Reg;
6579   int64_t Offset;
6580   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6581       parseComma() || parseImmExpr(Offset))
6582     return true;
6583   getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
6584   return false;
6585 }
6586 
6587 /// parseDirectiveSEHSaveRegX
6588 /// ::= .seh_save_reg_x
6589 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
6590   unsigned Reg;
6591   int64_t Offset;
6592   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6593       parseComma() || parseImmExpr(Offset))
6594     return true;
6595   getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
6596   return false;
6597 }
6598 
6599 /// parseDirectiveSEHSaveRegP
6600 /// ::= .seh_save_regp
6601 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
6602   unsigned Reg;
6603   int64_t Offset;
6604   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
6605       parseComma() || parseImmExpr(Offset))
6606     return true;
6607   getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
6608   return false;
6609 }
6610 
6611 /// parseDirectiveSEHSaveRegPX
6612 /// ::= .seh_save_regp_x
6613 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
6614   unsigned Reg;
6615   int64_t Offset;
6616   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
6617       parseComma() || parseImmExpr(Offset))
6618     return true;
6619   getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
6620   return false;
6621 }
6622 
6623 /// parseDirectiveSEHSaveLRPair
6624 /// ::= .seh_save_lrpair
6625 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
6626   unsigned Reg;
6627   int64_t Offset;
6628   L = getLoc();
6629   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6630       parseComma() || parseImmExpr(Offset))
6631     return true;
6632   if (check(((Reg - 19) % 2 != 0), L,
6633             "expected register with even offset from x19"))
6634     return true;
6635   getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
6636   return false;
6637 }
6638 
6639 /// parseDirectiveSEHSaveFReg
6640 /// ::= .seh_save_freg
6641 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
6642   unsigned Reg;
6643   int64_t Offset;
6644   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
6645       parseComma() || parseImmExpr(Offset))
6646     return true;
6647   getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
6648   return false;
6649 }
6650 
6651 /// parseDirectiveSEHSaveFRegX
6652 /// ::= .seh_save_freg_x
6653 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
6654   unsigned Reg;
6655   int64_t Offset;
6656   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
6657       parseComma() || parseImmExpr(Offset))
6658     return true;
6659   getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
6660   return false;
6661 }
6662 
6663 /// parseDirectiveSEHSaveFRegP
6664 /// ::= .seh_save_fregp
6665 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
6666   unsigned Reg;
6667   int64_t Offset;
6668   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
6669       parseComma() || parseImmExpr(Offset))
6670     return true;
6671   getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
6672   return false;
6673 }
6674 
6675 /// parseDirectiveSEHSaveFRegPX
6676 /// ::= .seh_save_fregp_x
6677 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
6678   unsigned Reg;
6679   int64_t Offset;
6680   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
6681       parseComma() || parseImmExpr(Offset))
6682     return true;
6683   getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
6684   return false;
6685 }
6686 
6687 /// parseDirectiveSEHSetFP
6688 /// ::= .seh_set_fp
6689 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
6690   getTargetStreamer().emitARM64WinCFISetFP();
6691   return false;
6692 }
6693 
6694 /// parseDirectiveSEHAddFP
6695 /// ::= .seh_add_fp
6696 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
6697   int64_t Size;
6698   if (parseImmExpr(Size))
6699     return true;
6700   getTargetStreamer().emitARM64WinCFIAddFP(Size);
6701   return false;
6702 }
6703 
6704 /// parseDirectiveSEHNop
6705 /// ::= .seh_nop
6706 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
6707   getTargetStreamer().emitARM64WinCFINop();
6708   return false;
6709 }
6710 
6711 /// parseDirectiveSEHSaveNext
6712 /// ::= .seh_save_next
6713 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
6714   getTargetStreamer().emitARM64WinCFISaveNext();
6715   return false;
6716 }
6717 
6718 /// parseDirectiveSEHEpilogStart
6719 /// ::= .seh_startepilogue
6720 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
6721   getTargetStreamer().emitARM64WinCFIEpilogStart();
6722   return false;
6723 }
6724 
6725 /// parseDirectiveSEHEpilogEnd
6726 /// ::= .seh_endepilogue
6727 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
6728   getTargetStreamer().emitARM64WinCFIEpilogEnd();
6729   return false;
6730 }
6731 
6732 /// parseDirectiveSEHTrapFrame
6733 /// ::= .seh_trap_frame
6734 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
6735   getTargetStreamer().emitARM64WinCFITrapFrame();
6736   return false;
6737 }
6738 
6739 /// parseDirectiveSEHMachineFrame
6740 /// ::= .seh_pushframe
6741 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
6742   getTargetStreamer().emitARM64WinCFIMachineFrame();
6743   return false;
6744 }
6745 
6746 /// parseDirectiveSEHContext
6747 /// ::= .seh_context
6748 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
6749   getTargetStreamer().emitARM64WinCFIContext();
6750   return false;
6751 }
6752 
6753 /// parseDirectiveSEHClearUnwoundToCall
6754 /// ::= .seh_clear_unwound_to_call
6755 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
6756   getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
6757   return false;
6758 }
6759 
6760 bool
6761 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
6762                                     AArch64MCExpr::VariantKind &ELFRefKind,
6763                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
6764                                     int64_t &Addend) {
6765   ELFRefKind = AArch64MCExpr::VK_INVALID;
6766   DarwinRefKind = MCSymbolRefExpr::VK_None;
6767   Addend = 0;
6768 
6769   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
6770     ELFRefKind = AE->getKind();
6771     Expr = AE->getSubExpr();
6772   }
6773 
6774   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
6775   if (SE) {
6776     // It's a simple symbol reference with no addend.
6777     DarwinRefKind = SE->getKind();
6778     return true;
6779   }
6780 
6781   // Check that it looks like a symbol + an addend
6782   MCValue Res;
6783   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
6784   if (!Relocatable || Res.getSymB())
6785     return false;
6786 
6787   // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
6788   // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
6789   if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
6790     return false;
6791 
6792   if (Res.getSymA())
6793     DarwinRefKind = Res.getSymA()->getKind();
6794   Addend = Res.getConstant();
6795 
6796   // It's some symbol reference + a constant addend, but really
6797   // shouldn't use both Darwin and ELF syntax.
6798   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
6799          DarwinRefKind == MCSymbolRefExpr::VK_None;
6800 }
6801 
6802 /// Force static initialization.
6803 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
6804   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
6805   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
6806   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
6807   RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
6808   RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
6809 }
6810 
6811 #define GET_REGISTER_MATCHER
6812 #define GET_SUBTARGET_FEATURE_NAME
6813 #define GET_MATCHER_IMPLEMENTATION
6814 #define GET_MNEMONIC_SPELL_CHECKER
6815 #include "AArch64GenAsmMatcher.inc"
6816 
6817 // Define this matcher function after the auto-generated include so we
6818 // have the match class enum definitions.
6819 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
6820                                                       unsigned Kind) {
6821   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
6822   // If the kind is a token for a literal immediate, check if our asm
6823   // operand matches. This is for InstAliases which have a fixed-value
6824   // immediate in the syntax.
6825   int64_t ExpectedVal;
6826   switch (Kind) {
6827   default:
6828     return Match_InvalidOperand;
6829   case MCK__HASH_0:
6830     ExpectedVal = 0;
6831     break;
6832   case MCK__HASH_1:
6833     ExpectedVal = 1;
6834     break;
6835   case MCK__HASH_12:
6836     ExpectedVal = 12;
6837     break;
6838   case MCK__HASH_16:
6839     ExpectedVal = 16;
6840     break;
6841   case MCK__HASH_2:
6842     ExpectedVal = 2;
6843     break;
6844   case MCK__HASH_24:
6845     ExpectedVal = 24;
6846     break;
6847   case MCK__HASH_3:
6848     ExpectedVal = 3;
6849     break;
6850   case MCK__HASH_32:
6851     ExpectedVal = 32;
6852     break;
6853   case MCK__HASH_4:
6854     ExpectedVal = 4;
6855     break;
6856   case MCK__HASH_48:
6857     ExpectedVal = 48;
6858     break;
6859   case MCK__HASH_6:
6860     ExpectedVal = 6;
6861     break;
6862   case MCK__HASH_64:
6863     ExpectedVal = 64;
6864     break;
6865   case MCK__HASH_8:
6866     ExpectedVal = 8;
6867     break;
6868   case MCK_MPR:
6869     // If the Kind is a token for the MPR register class which has the "za"
6870     // register (SME accumulator array), check if the asm is a literal "za"
6871     // token. This is for the "smstart za" alias that defines the register
6872     // as a literal token.
6873     if (Op.isTokenEqual("za"))
6874       return Match_Success;
6875     break;
6876   }
6877   if (!Op.isImm())
6878     return Match_InvalidOperand;
6879   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6880   if (!CE)
6881     return Match_InvalidOperand;
6882   if (CE->getValue() == ExpectedVal)
6883     return Match_Success;
6884   return Match_InvalidOperand;
6885 }
6886 
6887 OperandMatchResultTy
6888 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
6889 
6890   SMLoc S = getLoc();
6891 
6892   if (getTok().isNot(AsmToken::Identifier)) {
6893     Error(S, "expected register");
6894     return MatchOperand_ParseFail;
6895   }
6896 
6897   unsigned FirstReg;
6898   OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
6899   if (Res != MatchOperand_Success)
6900     return MatchOperand_ParseFail;
6901 
6902   const MCRegisterClass &WRegClass =
6903       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
6904   const MCRegisterClass &XRegClass =
6905       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
6906 
6907   bool isXReg = XRegClass.contains(FirstReg),
6908        isWReg = WRegClass.contains(FirstReg);
6909   if (!isXReg && !isWReg) {
6910     Error(S, "expected first even register of a "
6911              "consecutive same-size even/odd register pair");
6912     return MatchOperand_ParseFail;
6913   }
6914 
6915   const MCRegisterInfo *RI = getContext().getRegisterInfo();
6916   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
6917 
6918   if (FirstEncoding & 0x1) {
6919     Error(S, "expected first even register of a "
6920              "consecutive same-size even/odd register pair");
6921     return MatchOperand_ParseFail;
6922   }
6923 
6924   if (getTok().isNot(AsmToken::Comma)) {
6925     Error(getLoc(), "expected comma");
6926     return MatchOperand_ParseFail;
6927   }
6928   // Eat the comma
6929   Lex();
6930 
6931   SMLoc E = getLoc();
6932   unsigned SecondReg;
6933   Res = tryParseScalarRegister(SecondReg);
6934   if (Res != MatchOperand_Success)
6935     return MatchOperand_ParseFail;
6936 
6937   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
6938       (isXReg && !XRegClass.contains(SecondReg)) ||
6939       (isWReg && !WRegClass.contains(SecondReg))) {
6940     Error(E,"expected second odd register of a "
6941              "consecutive same-size even/odd register pair");
6942     return MatchOperand_ParseFail;
6943   }
6944 
6945   unsigned Pair = 0;
6946   if (isXReg) {
6947     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
6948            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
6949   } else {
6950     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
6951            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
6952   }
6953 
6954   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
6955       getLoc(), getContext()));
6956 
6957   return MatchOperand_Success;
6958 }
6959 
6960 template <bool ParseShiftExtend, bool ParseSuffix>
6961 OperandMatchResultTy
6962 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
6963   const SMLoc S = getLoc();
6964   // Check for a SVE vector register specifier first.
6965   unsigned RegNum;
6966   StringRef Kind;
6967 
6968   OperandMatchResultTy Res =
6969       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6970 
6971   if (Res != MatchOperand_Success)
6972     return Res;
6973 
6974   if (ParseSuffix && Kind.empty())
6975     return MatchOperand_NoMatch;
6976 
6977   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
6978   if (!KindRes)
6979     return MatchOperand_NoMatch;
6980 
6981   unsigned ElementWidth = KindRes->second;
6982 
6983   // No shift/extend is the default.
6984   if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
6985     Operands.push_back(AArch64Operand::CreateVectorReg(
6986         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
6987 
6988     OperandMatchResultTy Res = tryParseVectorIndex(Operands);
6989     if (Res == MatchOperand_ParseFail)
6990       return MatchOperand_ParseFail;
6991     return MatchOperand_Success;
6992   }
6993 
6994   // Eat the comma
6995   Lex();
6996 
6997   // Match the shift
6998   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
6999   Res = tryParseOptionalShiftExtend(ExtOpnd);
7000   if (Res != MatchOperand_Success)
7001     return Res;
7002 
7003   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7004   Operands.push_back(AArch64Operand::CreateVectorReg(
7005       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7006       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7007       Ext->hasShiftExtendAmount()));
7008 
7009   return MatchOperand_Success;
7010 }
7011 
7012 OperandMatchResultTy
7013 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7014   MCAsmParser &Parser = getParser();
7015 
7016   SMLoc SS = getLoc();
7017   const AsmToken &TokE = getTok();
7018   bool IsHash = TokE.is(AsmToken::Hash);
7019 
7020   if (!IsHash && TokE.isNot(AsmToken::Identifier))
7021     return MatchOperand_NoMatch;
7022 
7023   int64_t Pattern;
7024   if (IsHash) {
7025     Lex(); // Eat hash
7026 
7027     // Parse the immediate operand.
7028     const MCExpr *ImmVal;
7029     SS = getLoc();
7030     if (Parser.parseExpression(ImmVal))
7031       return MatchOperand_ParseFail;
7032 
7033     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
7034     if (!MCE)
7035       return MatchOperand_ParseFail;
7036 
7037     Pattern = MCE->getValue();
7038   } else {
7039     // Parse the pattern
7040     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
7041     if (!Pat)
7042       return MatchOperand_NoMatch;
7043 
7044     Lex();
7045     Pattern = Pat->Encoding;
7046     assert(Pattern >= 0 && Pattern < 32);
7047   }
7048 
7049   Operands.push_back(
7050       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7051                                 SS, getLoc(), getContext()));
7052 
7053   return MatchOperand_Success;
7054 }
7055 
7056 OperandMatchResultTy
7057 AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
7058   SMLoc SS = getLoc();
7059 
7060   unsigned XReg;
7061   if (tryParseScalarRegister(XReg) != MatchOperand_Success)
7062     return MatchOperand_NoMatch;
7063 
7064   MCContext &ctx = getContext();
7065   const MCRegisterInfo *RI = ctx.getRegisterInfo();
7066   int X8Reg = RI->getMatchingSuperReg(
7067       XReg, AArch64::x8sub_0,
7068       &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
7069   if (!X8Reg) {
7070     Error(SS, "expected an even-numbered x-register in the range [x0,x22]");
7071     return MatchOperand_ParseFail;
7072   }
7073 
7074   Operands.push_back(
7075       AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
7076   return MatchOperand_Success;
7077 }
7078