1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64InstrInfo.h"
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64InstPrinter.h"
12 #include "MCTargetDesc/AArch64MCExpr.h"
13 #include "MCTargetDesc/AArch64MCTargetDesc.h"
14 #include "MCTargetDesc/AArch64TargetStreamer.h"
15 #include "TargetInfo/AArch64TargetInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCLinkerOptimizationHint.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSubtargetInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCTargetOptions.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/MC/TargetRegistry.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/SMLoc.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/TargetParser/AArch64TargetParser.h"
52 #include "llvm/TargetParser/SubtargetFeature.h"
53 #include <cassert>
54 #include <cctype>
55 #include <cstdint>
56 #include <cstdio>
57 #include <optional>
58 #include <string>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 namespace {
66 
67 enum class RegKind {
68   Scalar,
69   NeonVector,
70   SVEDataVector,
71   SVEPredicateAsCounter,
72   SVEPredicateVector,
73   Matrix,
74   LookupTable
75 };
76 
77 enum class MatrixKind { Array, Tile, Row, Col };
78 
79 enum RegConstraintEqualityTy {
80   EqualsReg,
81   EqualsSuperReg,
82   EqualsSubReg
83 };
84 
85 class AArch64AsmParser : public MCTargetAsmParser {
86 private:
87   StringRef Mnemonic; ///< Instruction mnemonic.
88 
89   // Map of register aliases registers via the .req directive.
90   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
91 
92   class PrefixInfo {
93   public:
94     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95       PrefixInfo Prefix;
96       switch (Inst.getOpcode()) {
97       case AArch64::MOVPRFX_ZZ:
98         Prefix.Active = true;
99         Prefix.Dst = Inst.getOperand(0).getReg();
100         break;
101       case AArch64::MOVPRFX_ZPmZ_B:
102       case AArch64::MOVPRFX_ZPmZ_H:
103       case AArch64::MOVPRFX_ZPmZ_S:
104       case AArch64::MOVPRFX_ZPmZ_D:
105         Prefix.Active = true;
106         Prefix.Predicated = true;
107         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109                "No destructive element size set for movprfx");
110         Prefix.Dst = Inst.getOperand(0).getReg();
111         Prefix.Pg = Inst.getOperand(2).getReg();
112         break;
113       case AArch64::MOVPRFX_ZPzZ_B:
114       case AArch64::MOVPRFX_ZPzZ_H:
115       case AArch64::MOVPRFX_ZPzZ_S:
116       case AArch64::MOVPRFX_ZPzZ_D:
117         Prefix.Active = true;
118         Prefix.Predicated = true;
119         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121                "No destructive element size set for movprfx");
122         Prefix.Dst = Inst.getOperand(0).getReg();
123         Prefix.Pg = Inst.getOperand(1).getReg();
124         break;
125       default:
126         break;
127       }
128 
129       return Prefix;
130     }
131 
132     PrefixInfo() = default;
133     bool isActive() const { return Active; }
134     bool isPredicated() const { return Predicated; }
135     unsigned getElementSize() const {
136       assert(Predicated);
137       return ElementSize;
138     }
139     unsigned getDstReg() const { return Dst; }
140     unsigned getPgReg() const {
141       assert(Predicated);
142       return Pg;
143     }
144 
145   private:
146     bool Active = false;
147     bool Predicated = false;
148     unsigned ElementSize;
149     unsigned Dst;
150     unsigned Pg;
151   } NextPrefix;
152 
153   AArch64TargetStreamer &getTargetStreamer() {
154     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
155     return static_cast<AArch64TargetStreamer &>(TS);
156   }
157 
158   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159 
160   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161   bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163   AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164                                           std::string &Suggestion);
165   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
167   bool parseRegister(OperandVector &Operands);
168   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169   bool parseNeonVectorList(OperandVector &Operands);
170   bool parseOptionalMulOperand(OperandVector &Operands);
171   bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172   bool parseKeywordOperand(OperandVector &Operands);
173   bool parseOperand(OperandVector &Operands, bool isCondCode,
174                     bool invertCondCode);
175   bool parseImmExpr(int64_t &Out);
176   bool parseComma();
177   bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178                             unsigned Last);
179 
180   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
181                       OperandVector &Operands);
182 
183   bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
184 
185   bool parseDirectiveArch(SMLoc L);
186   bool parseDirectiveArchExtension(SMLoc L);
187   bool parseDirectiveCPU(SMLoc L);
188   bool parseDirectiveInst(SMLoc L);
189 
190   bool parseDirectiveTLSDescCall(SMLoc L);
191 
192   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
193   bool parseDirectiveLtorg(SMLoc L);
194 
195   bool parseDirectiveReq(StringRef Name, SMLoc L);
196   bool parseDirectiveUnreq(SMLoc L);
197   bool parseDirectiveCFINegateRAState();
198   bool parseDirectiveCFIBKeyFrame();
199   bool parseDirectiveCFIMTETaggedFrame();
200 
201   bool parseDirectiveVariantPCS(SMLoc L);
202 
203   bool parseDirectiveSEHAllocStack(SMLoc L);
204   bool parseDirectiveSEHPrologEnd(SMLoc L);
205   bool parseDirectiveSEHSaveR19R20X(SMLoc L);
206   bool parseDirectiveSEHSaveFPLR(SMLoc L);
207   bool parseDirectiveSEHSaveFPLRX(SMLoc L);
208   bool parseDirectiveSEHSaveReg(SMLoc L);
209   bool parseDirectiveSEHSaveRegX(SMLoc L);
210   bool parseDirectiveSEHSaveRegP(SMLoc L);
211   bool parseDirectiveSEHSaveRegPX(SMLoc L);
212   bool parseDirectiveSEHSaveLRPair(SMLoc L);
213   bool parseDirectiveSEHSaveFReg(SMLoc L);
214   bool parseDirectiveSEHSaveFRegX(SMLoc L);
215   bool parseDirectiveSEHSaveFRegP(SMLoc L);
216   bool parseDirectiveSEHSaveFRegPX(SMLoc L);
217   bool parseDirectiveSEHSetFP(SMLoc L);
218   bool parseDirectiveSEHAddFP(SMLoc L);
219   bool parseDirectiveSEHNop(SMLoc L);
220   bool parseDirectiveSEHSaveNext(SMLoc L);
221   bool parseDirectiveSEHEpilogStart(SMLoc L);
222   bool parseDirectiveSEHEpilogEnd(SMLoc L);
223   bool parseDirectiveSEHTrapFrame(SMLoc L);
224   bool parseDirectiveSEHMachineFrame(SMLoc L);
225   bool parseDirectiveSEHContext(SMLoc L);
226   bool parseDirectiveSEHECContext(SMLoc L);
227   bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
228   bool parseDirectiveSEHPACSignLR(SMLoc L);
229   bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
230 
231   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
232                            SmallVectorImpl<SMLoc> &Loc);
233   unsigned getNumRegsForRegKind(RegKind K);
234   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
235                                OperandVector &Operands, MCStreamer &Out,
236                                uint64_t &ErrorInfo,
237                                bool MatchingInlineAsm) override;
238 /// @name Auto-generated Match Functions
239 /// {
240 
241 #define GET_ASSEMBLER_HEADER
242 #include "AArch64GenAsmMatcher.inc"
243 
244   /// }
245 
246   ParseStatus tryParseScalarRegister(MCRegister &Reg);
247   ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
248                                      RegKind MatchKind);
249   ParseStatus tryParseMatrixRegister(OperandVector &Operands);
250   ParseStatus tryParseSVCR(OperandVector &Operands);
251   ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
252   ParseStatus tryParseBarrierOperand(OperandVector &Operands);
253   ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
254   ParseStatus tryParseSysReg(OperandVector &Operands);
255   ParseStatus tryParseSysCROperand(OperandVector &Operands);
256   template <bool IsSVEPrefetch = false>
257   ParseStatus tryParsePrefetch(OperandVector &Operands);
258   ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
259   ParseStatus tryParsePSBHint(OperandVector &Operands);
260   ParseStatus tryParseBTIHint(OperandVector &Operands);
261   ParseStatus tryParseAdrpLabel(OperandVector &Operands);
262   ParseStatus tryParseAdrLabel(OperandVector &Operands);
263   template <bool AddFPZeroAsLiteral>
264   ParseStatus tryParseFPImm(OperandVector &Operands);
265   ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
266   ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
267   bool tryParseNeonVectorRegister(OperandVector &Operands);
268   ParseStatus tryParseVectorIndex(OperandVector &Operands);
269   ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
270   ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
271   template <bool ParseShiftExtend,
272             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
273   ParseStatus tryParseGPROperand(OperandVector &Operands);
274   ParseStatus tryParseZTOperand(OperandVector &Operands);
275   template <bool ParseShiftExtend, bool ParseSuffix>
276   ParseStatus tryParseSVEDataVector(OperandVector &Operands);
277   template <RegKind RK>
278   ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
279   template <RegKind VectorKind>
280   ParseStatus tryParseVectorList(OperandVector &Operands,
281                                  bool ExpectMatch = false);
282   ParseStatus tryParseMatrixTileList(OperandVector &Operands);
283   ParseStatus tryParseSVEPattern(OperandVector &Operands);
284   ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
285   ParseStatus tryParseGPR64x8(OperandVector &Operands);
286   ParseStatus tryParseImmRange(OperandVector &Operands);
287 
288 public:
289   enum AArch64MatchResultTy {
290     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
291 #define GET_OPERAND_DIAGNOSTIC_TYPES
292 #include "AArch64GenAsmMatcher.inc"
293   };
294   bool IsILP32;
295 
296   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
297                    const MCInstrInfo &MII, const MCTargetOptions &Options)
298     : MCTargetAsmParser(Options, STI, MII) {
299     IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
300     MCAsmParserExtension::Initialize(Parser);
301     MCStreamer &S = getParser().getStreamer();
302     if (S.getTargetStreamer() == nullptr)
303       new AArch64TargetStreamer(S);
304 
305     // Alias .hword/.word/.[dx]word to the target-independent
306     // .2byte/.4byte/.8byte directives as they have the same form and
307     // semantics:
308     ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
309     Parser.addAliasForDirective(".hword", ".2byte");
310     Parser.addAliasForDirective(".word", ".4byte");
311     Parser.addAliasForDirective(".dword", ".8byte");
312     Parser.addAliasForDirective(".xword", ".8byte");
313 
314     // Initialize the set of available features.
315     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
316   }
317 
318   bool areEqualRegs(const MCParsedAsmOperand &Op1,
319                     const MCParsedAsmOperand &Op2) const override;
320   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
321                         SMLoc NameLoc, OperandVector &Operands) override;
322   bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
323   ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
324                                SMLoc &EndLoc) override;
325   bool ParseDirective(AsmToken DirectiveID) override;
326   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
327                                       unsigned Kind) override;
328 
329   bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) override;
330 
331   static bool classifySymbolRef(const MCExpr *Expr,
332                                 AArch64MCExpr::VariantKind &ELFRefKind,
333                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
334                                 int64_t &Addend);
335 };
336 
337 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
338 /// instruction.
339 class AArch64Operand : public MCParsedAsmOperand {
340 private:
341   enum KindTy {
342     k_Immediate,
343     k_ShiftedImm,
344     k_ImmRange,
345     k_CondCode,
346     k_Register,
347     k_MatrixRegister,
348     k_MatrixTileList,
349     k_SVCR,
350     k_VectorList,
351     k_VectorIndex,
352     k_Token,
353     k_SysReg,
354     k_SysCR,
355     k_Prefetch,
356     k_ShiftExtend,
357     k_FPImm,
358     k_Barrier,
359     k_PSBHint,
360     k_BTIHint,
361   } Kind;
362 
363   SMLoc StartLoc, EndLoc;
364 
365   struct TokOp {
366     const char *Data;
367     unsigned Length;
368     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
369   };
370 
371   // Separate shift/extend operand.
372   struct ShiftExtendOp {
373     AArch64_AM::ShiftExtendType Type;
374     unsigned Amount;
375     bool HasExplicitAmount;
376   };
377 
378   struct RegOp {
379     unsigned RegNum;
380     RegKind Kind;
381     int ElementWidth;
382 
383     // The register may be allowed as a different register class,
384     // e.g. for GPR64as32 or GPR32as64.
385     RegConstraintEqualityTy EqualityTy;
386 
387     // In some cases the shift/extend needs to be explicitly parsed together
388     // with the register, rather than as a separate operand. This is needed
389     // for addressing modes where the instruction as a whole dictates the
390     // scaling/extend, rather than specific bits in the instruction.
391     // By parsing them as a single operand, we avoid the need to pass an
392     // extra operand in all CodeGen patterns (because all operands need to
393     // have an associated value), and we avoid the need to update TableGen to
394     // accept operands that have no associated bits in the instruction.
395     //
396     // An added benefit of parsing them together is that the assembler
397     // can give a sensible diagnostic if the scaling is not correct.
398     //
399     // The default is 'lsl #0' (HasExplicitAmount = false) if no
400     // ShiftExtend is specified.
401     ShiftExtendOp ShiftExtend;
402   };
403 
404   struct MatrixRegOp {
405     unsigned RegNum;
406     unsigned ElementWidth;
407     MatrixKind Kind;
408   };
409 
410   struct MatrixTileListOp {
411     unsigned RegMask = 0;
412   };
413 
414   struct VectorListOp {
415     unsigned RegNum;
416     unsigned Count;
417     unsigned Stride;
418     unsigned NumElements;
419     unsigned ElementWidth;
420     RegKind  RegisterKind;
421   };
422 
423   struct VectorIndexOp {
424     int Val;
425   };
426 
427   struct ImmOp {
428     const MCExpr *Val;
429   };
430 
431   struct ShiftedImmOp {
432     const MCExpr *Val;
433     unsigned ShiftAmount;
434   };
435 
436   struct ImmRangeOp {
437     unsigned First;
438     unsigned Last;
439   };
440 
441   struct CondCodeOp {
442     AArch64CC::CondCode Code;
443   };
444 
445   struct FPImmOp {
446     uint64_t Val; // APFloat value bitcasted to uint64_t.
447     bool IsExact; // describes whether parsed value was exact.
448   };
449 
450   struct BarrierOp {
451     const char *Data;
452     unsigned Length;
453     unsigned Val; // Not the enum since not all values have names.
454     bool HasnXSModifier;
455   };
456 
457   struct SysRegOp {
458     const char *Data;
459     unsigned Length;
460     uint32_t MRSReg;
461     uint32_t MSRReg;
462     uint32_t PStateField;
463   };
464 
465   struct SysCRImmOp {
466     unsigned Val;
467   };
468 
469   struct PrefetchOp {
470     const char *Data;
471     unsigned Length;
472     unsigned Val;
473   };
474 
475   struct PSBHintOp {
476     const char *Data;
477     unsigned Length;
478     unsigned Val;
479   };
480 
481   struct BTIHintOp {
482     const char *Data;
483     unsigned Length;
484     unsigned Val;
485   };
486 
487   struct SVCROp {
488     const char *Data;
489     unsigned Length;
490     unsigned PStateField;
491   };
492 
493   union {
494     struct TokOp Tok;
495     struct RegOp Reg;
496     struct MatrixRegOp MatrixReg;
497     struct MatrixTileListOp MatrixTileList;
498     struct VectorListOp VectorList;
499     struct VectorIndexOp VectorIndex;
500     struct ImmOp Imm;
501     struct ShiftedImmOp ShiftedImm;
502     struct ImmRangeOp ImmRange;
503     struct CondCodeOp CondCode;
504     struct FPImmOp FPImm;
505     struct BarrierOp Barrier;
506     struct SysRegOp SysReg;
507     struct SysCRImmOp SysCRImm;
508     struct PrefetchOp Prefetch;
509     struct PSBHintOp PSBHint;
510     struct BTIHintOp BTIHint;
511     struct ShiftExtendOp ShiftExtend;
512     struct SVCROp SVCR;
513   };
514 
515   // Keep the MCContext around as the MCExprs may need manipulated during
516   // the add<>Operands() calls.
517   MCContext &Ctx;
518 
519 public:
520   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
521 
522   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
523     Kind = o.Kind;
524     StartLoc = o.StartLoc;
525     EndLoc = o.EndLoc;
526     switch (Kind) {
527     case k_Token:
528       Tok = o.Tok;
529       break;
530     case k_Immediate:
531       Imm = o.Imm;
532       break;
533     case k_ShiftedImm:
534       ShiftedImm = o.ShiftedImm;
535       break;
536     case k_ImmRange:
537       ImmRange = o.ImmRange;
538       break;
539     case k_CondCode:
540       CondCode = o.CondCode;
541       break;
542     case k_FPImm:
543       FPImm = o.FPImm;
544       break;
545     case k_Barrier:
546       Barrier = o.Barrier;
547       break;
548     case k_Register:
549       Reg = o.Reg;
550       break;
551     case k_MatrixRegister:
552       MatrixReg = o.MatrixReg;
553       break;
554     case k_MatrixTileList:
555       MatrixTileList = o.MatrixTileList;
556       break;
557     case k_VectorList:
558       VectorList = o.VectorList;
559       break;
560     case k_VectorIndex:
561       VectorIndex = o.VectorIndex;
562       break;
563     case k_SysReg:
564       SysReg = o.SysReg;
565       break;
566     case k_SysCR:
567       SysCRImm = o.SysCRImm;
568       break;
569     case k_Prefetch:
570       Prefetch = o.Prefetch;
571       break;
572     case k_PSBHint:
573       PSBHint = o.PSBHint;
574       break;
575     case k_BTIHint:
576       BTIHint = o.BTIHint;
577       break;
578     case k_ShiftExtend:
579       ShiftExtend = o.ShiftExtend;
580       break;
581     case k_SVCR:
582       SVCR = o.SVCR;
583       break;
584     }
585   }
586 
587   /// getStartLoc - Get the location of the first token of this operand.
588   SMLoc getStartLoc() const override { return StartLoc; }
589   /// getEndLoc - Get the location of the last token of this operand.
590   SMLoc getEndLoc() const override { return EndLoc; }
591 
592   StringRef getToken() const {
593     assert(Kind == k_Token && "Invalid access!");
594     return StringRef(Tok.Data, Tok.Length);
595   }
596 
597   bool isTokenSuffix() const {
598     assert(Kind == k_Token && "Invalid access!");
599     return Tok.IsSuffix;
600   }
601 
602   const MCExpr *getImm() const {
603     assert(Kind == k_Immediate && "Invalid access!");
604     return Imm.Val;
605   }
606 
607   const MCExpr *getShiftedImmVal() const {
608     assert(Kind == k_ShiftedImm && "Invalid access!");
609     return ShiftedImm.Val;
610   }
611 
612   unsigned getShiftedImmShift() const {
613     assert(Kind == k_ShiftedImm && "Invalid access!");
614     return ShiftedImm.ShiftAmount;
615   }
616 
617   unsigned getFirstImmVal() const {
618     assert(Kind == k_ImmRange && "Invalid access!");
619     return ImmRange.First;
620   }
621 
622   unsigned getLastImmVal() const {
623     assert(Kind == k_ImmRange && "Invalid access!");
624     return ImmRange.Last;
625   }
626 
627   AArch64CC::CondCode getCondCode() const {
628     assert(Kind == k_CondCode && "Invalid access!");
629     return CondCode.Code;
630   }
631 
632   APFloat getFPImm() const {
633     assert (Kind == k_FPImm && "Invalid access!");
634     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
635   }
636 
637   bool getFPImmIsExact() const {
638     assert (Kind == k_FPImm && "Invalid access!");
639     return FPImm.IsExact;
640   }
641 
642   unsigned getBarrier() const {
643     assert(Kind == k_Barrier && "Invalid access!");
644     return Barrier.Val;
645   }
646 
647   StringRef getBarrierName() const {
648     assert(Kind == k_Barrier && "Invalid access!");
649     return StringRef(Barrier.Data, Barrier.Length);
650   }
651 
652   bool getBarriernXSModifier() const {
653     assert(Kind == k_Barrier && "Invalid access!");
654     return Barrier.HasnXSModifier;
655   }
656 
657   unsigned getReg() const override {
658     assert(Kind == k_Register && "Invalid access!");
659     return Reg.RegNum;
660   }
661 
662   unsigned getMatrixReg() const {
663     assert(Kind == k_MatrixRegister && "Invalid access!");
664     return MatrixReg.RegNum;
665   }
666 
667   unsigned getMatrixElementWidth() const {
668     assert(Kind == k_MatrixRegister && "Invalid access!");
669     return MatrixReg.ElementWidth;
670   }
671 
672   MatrixKind getMatrixKind() const {
673     assert(Kind == k_MatrixRegister && "Invalid access!");
674     return MatrixReg.Kind;
675   }
676 
677   unsigned getMatrixTileListRegMask() const {
678     assert(isMatrixTileList() && "Invalid access!");
679     return MatrixTileList.RegMask;
680   }
681 
682   RegConstraintEqualityTy getRegEqualityTy() const {
683     assert(Kind == k_Register && "Invalid access!");
684     return Reg.EqualityTy;
685   }
686 
687   unsigned getVectorListStart() const {
688     assert(Kind == k_VectorList && "Invalid access!");
689     return VectorList.RegNum;
690   }
691 
692   unsigned getVectorListCount() const {
693     assert(Kind == k_VectorList && "Invalid access!");
694     return VectorList.Count;
695   }
696 
697   unsigned getVectorListStride() const {
698     assert(Kind == k_VectorList && "Invalid access!");
699     return VectorList.Stride;
700   }
701 
702   int getVectorIndex() const {
703     assert(Kind == k_VectorIndex && "Invalid access!");
704     return VectorIndex.Val;
705   }
706 
707   StringRef getSysReg() const {
708     assert(Kind == k_SysReg && "Invalid access!");
709     return StringRef(SysReg.Data, SysReg.Length);
710   }
711 
712   unsigned getSysCR() const {
713     assert(Kind == k_SysCR && "Invalid access!");
714     return SysCRImm.Val;
715   }
716 
717   unsigned getPrefetch() const {
718     assert(Kind == k_Prefetch && "Invalid access!");
719     return Prefetch.Val;
720   }
721 
722   unsigned getPSBHint() const {
723     assert(Kind == k_PSBHint && "Invalid access!");
724     return PSBHint.Val;
725   }
726 
727   StringRef getPSBHintName() const {
728     assert(Kind == k_PSBHint && "Invalid access!");
729     return StringRef(PSBHint.Data, PSBHint.Length);
730   }
731 
732   unsigned getBTIHint() const {
733     assert(Kind == k_BTIHint && "Invalid access!");
734     return BTIHint.Val;
735   }
736 
737   StringRef getBTIHintName() const {
738     assert(Kind == k_BTIHint && "Invalid access!");
739     return StringRef(BTIHint.Data, BTIHint.Length);
740   }
741 
742   StringRef getSVCR() const {
743     assert(Kind == k_SVCR && "Invalid access!");
744     return StringRef(SVCR.Data, SVCR.Length);
745   }
746 
747   StringRef getPrefetchName() const {
748     assert(Kind == k_Prefetch && "Invalid access!");
749     return StringRef(Prefetch.Data, Prefetch.Length);
750   }
751 
752   AArch64_AM::ShiftExtendType getShiftExtendType() const {
753     if (Kind == k_ShiftExtend)
754       return ShiftExtend.Type;
755     if (Kind == k_Register)
756       return Reg.ShiftExtend.Type;
757     llvm_unreachable("Invalid access!");
758   }
759 
760   unsigned getShiftExtendAmount() const {
761     if (Kind == k_ShiftExtend)
762       return ShiftExtend.Amount;
763     if (Kind == k_Register)
764       return Reg.ShiftExtend.Amount;
765     llvm_unreachable("Invalid access!");
766   }
767 
768   bool hasShiftExtendAmount() const {
769     if (Kind == k_ShiftExtend)
770       return ShiftExtend.HasExplicitAmount;
771     if (Kind == k_Register)
772       return Reg.ShiftExtend.HasExplicitAmount;
773     llvm_unreachable("Invalid access!");
774   }
775 
776   bool isImm() const override { return Kind == k_Immediate; }
777   bool isMem() const override { return false; }
778 
779   bool isUImm6() const {
780     if (!isImm())
781       return false;
782     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
783     if (!MCE)
784       return false;
785     int64_t Val = MCE->getValue();
786     return (Val >= 0 && Val < 64);
787   }
788 
789   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
790 
791   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
792     return isImmScaled<Bits, Scale>(true);
793   }
794 
795   template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
796   DiagnosticPredicate isUImmScaled() const {
797     if (IsRange && isImmRange() &&
798         (getLastImmVal() != getFirstImmVal() + Offset))
799       return DiagnosticPredicateTy::NoMatch;
800 
801     return isImmScaled<Bits, Scale, IsRange>(false);
802   }
803 
804   template <int Bits, int Scale, bool IsRange = false>
805   DiagnosticPredicate isImmScaled(bool Signed) const {
806     if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
807         (isImmRange() && !IsRange))
808       return DiagnosticPredicateTy::NoMatch;
809 
810     int64_t Val;
811     if (isImmRange())
812       Val = getFirstImmVal();
813     else {
814       const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
815       if (!MCE)
816         return DiagnosticPredicateTy::NoMatch;
817       Val = MCE->getValue();
818     }
819 
820     int64_t MinVal, MaxVal;
821     if (Signed) {
822       int64_t Shift = Bits - 1;
823       MinVal = (int64_t(1) << Shift) * -Scale;
824       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
825     } else {
826       MinVal = 0;
827       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
828     }
829 
830     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
831       return DiagnosticPredicateTy::Match;
832 
833     return DiagnosticPredicateTy::NearMatch;
834   }
835 
836   DiagnosticPredicate isSVEPattern() const {
837     if (!isImm())
838       return DiagnosticPredicateTy::NoMatch;
839     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
840     if (!MCE)
841       return DiagnosticPredicateTy::NoMatch;
842     int64_t Val = MCE->getValue();
843     if (Val >= 0 && Val < 32)
844       return DiagnosticPredicateTy::Match;
845     return DiagnosticPredicateTy::NearMatch;
846   }
847 
848   DiagnosticPredicate isSVEVecLenSpecifier() const {
849     if (!isImm())
850       return DiagnosticPredicateTy::NoMatch;
851     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
852     if (!MCE)
853       return DiagnosticPredicateTy::NoMatch;
854     int64_t Val = MCE->getValue();
855     if (Val >= 0 && Val <= 1)
856       return DiagnosticPredicateTy::Match;
857     return DiagnosticPredicateTy::NearMatch;
858   }
859 
860   bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
861     AArch64MCExpr::VariantKind ELFRefKind;
862     MCSymbolRefExpr::VariantKind DarwinRefKind;
863     int64_t Addend;
864     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
865                                            Addend)) {
866       // If we don't understand the expression, assume the best and
867       // let the fixup and relocation code deal with it.
868       return true;
869     }
870 
871     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
872         ELFRefKind == AArch64MCExpr::VK_LO12 ||
873         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
874         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
875         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
876         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
877         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
878         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
879         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
880         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
881         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
882         ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
883       // Note that we don't range-check the addend. It's adjusted modulo page
884       // size when converted, so there is no "out of range" condition when using
885       // @pageoff.
886       return true;
887     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
888                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
889       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
890       return Addend == 0;
891     }
892 
893     return false;
894   }
895 
896   template <int Scale> bool isUImm12Offset() const {
897     if (!isImm())
898       return false;
899 
900     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
901     if (!MCE)
902       return isSymbolicUImm12Offset(getImm());
903 
904     int64_t Val = MCE->getValue();
905     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
906   }
907 
908   template <int N, int M>
909   bool isImmInRange() const {
910     if (!isImm())
911       return false;
912     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
913     if (!MCE)
914       return false;
915     int64_t Val = MCE->getValue();
916     return (Val >= N && Val <= M);
917   }
918 
919   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
920   // a logical immediate can always be represented when inverted.
921   template <typename T>
922   bool isLogicalImm() const {
923     if (!isImm())
924       return false;
925     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
926     if (!MCE)
927       return false;
928 
929     int64_t Val = MCE->getValue();
930     // Avoid left shift by 64 directly.
931     uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
932     // Allow all-0 or all-1 in top bits to permit bitwise NOT.
933     if ((Val & Upper) && (Val & Upper) != Upper)
934       return false;
935 
936     return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
937   }
938 
939   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
940 
941   bool isImmRange() const { return Kind == k_ImmRange; }
942 
943   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
944   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
945   /// immediate that can be shifted by 'Shift'.
946   template <unsigned Width>
947   std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
948     if (isShiftedImm() && Width == getShiftedImmShift())
949       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
950         return std::make_pair(CE->getValue(), Width);
951 
952     if (isImm())
953       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
954         int64_t Val = CE->getValue();
955         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
956           return std::make_pair(Val >> Width, Width);
957         else
958           return std::make_pair(Val, 0u);
959       }
960 
961     return {};
962   }
963 
964   bool isAddSubImm() const {
965     if (!isShiftedImm() && !isImm())
966       return false;
967 
968     const MCExpr *Expr;
969 
970     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
971     if (isShiftedImm()) {
972       unsigned Shift = ShiftedImm.ShiftAmount;
973       Expr = ShiftedImm.Val;
974       if (Shift != 0 && Shift != 12)
975         return false;
976     } else {
977       Expr = getImm();
978     }
979 
980     AArch64MCExpr::VariantKind ELFRefKind;
981     MCSymbolRefExpr::VariantKind DarwinRefKind;
982     int64_t Addend;
983     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
984                                           DarwinRefKind, Addend)) {
985       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
986           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
987           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
988           || ELFRefKind == AArch64MCExpr::VK_LO12
989           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
990           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
991           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
992           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
993           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
994           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
995           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
996           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
997           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
998     }
999 
1000     // If it's a constant, it should be a real immediate in range.
1001     if (auto ShiftedVal = getShiftedVal<12>())
1002       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1003 
1004     // If it's an expression, we hope for the best and let the fixup/relocation
1005     // code deal with it.
1006     return true;
1007   }
1008 
1009   bool isAddSubImmNeg() const {
1010     if (!isShiftedImm() && !isImm())
1011       return false;
1012 
1013     // Otherwise it should be a real negative immediate in range.
1014     if (auto ShiftedVal = getShiftedVal<12>())
1015       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1016 
1017     return false;
1018   }
1019 
1020   // Signed value in the range -128 to +127. For element widths of
1021   // 16 bits or higher it may also be a signed multiple of 256 in the
1022   // range -32768 to +32512.
1023   // For element-width of 8 bits a range of -128 to 255 is accepted,
1024   // since a copy of a byte can be either signed/unsigned.
1025   template <typename T>
1026   DiagnosticPredicate isSVECpyImm() const {
1027     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1028       return DiagnosticPredicateTy::NoMatch;
1029 
1030     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1031                   std::is_same<int8_t, T>::value;
1032     if (auto ShiftedImm = getShiftedVal<8>())
1033       if (!(IsByte && ShiftedImm->second) &&
1034           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1035                                      << ShiftedImm->second))
1036         return DiagnosticPredicateTy::Match;
1037 
1038     return DiagnosticPredicateTy::NearMatch;
1039   }
1040 
1041   // Unsigned value in the range 0 to 255. For element widths of
1042   // 16 bits or higher it may also be a signed multiple of 256 in the
1043   // range 0 to 65280.
1044   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1045     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1046       return DiagnosticPredicateTy::NoMatch;
1047 
1048     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1049                   std::is_same<int8_t, T>::value;
1050     if (auto ShiftedImm = getShiftedVal<8>())
1051       if (!(IsByte && ShiftedImm->second) &&
1052           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1053                                         << ShiftedImm->second))
1054         return DiagnosticPredicateTy::Match;
1055 
1056     return DiagnosticPredicateTy::NearMatch;
1057   }
1058 
1059   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1060     if (isLogicalImm<T>() && !isSVECpyImm<T>())
1061       return DiagnosticPredicateTy::Match;
1062     return DiagnosticPredicateTy::NoMatch;
1063   }
1064 
1065   bool isCondCode() const { return Kind == k_CondCode; }
1066 
1067   bool isSIMDImmType10() const {
1068     if (!isImm())
1069       return false;
1070     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1071     if (!MCE)
1072       return false;
1073     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1074   }
1075 
1076   template<int N>
1077   bool isBranchTarget() const {
1078     if (!isImm())
1079       return false;
1080     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1081     if (!MCE)
1082       return true;
1083     int64_t Val = MCE->getValue();
1084     if (Val & 0x3)
1085       return false;
1086     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1087     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1088   }
1089 
1090   bool
1091   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1092     if (!isImm())
1093       return false;
1094 
1095     AArch64MCExpr::VariantKind ELFRefKind;
1096     MCSymbolRefExpr::VariantKind DarwinRefKind;
1097     int64_t Addend;
1098     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1099                                              DarwinRefKind, Addend)) {
1100       return false;
1101     }
1102     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1103       return false;
1104 
1105     return llvm::is_contained(AllowedModifiers, ELFRefKind);
1106   }
1107 
1108   bool isMovWSymbolG3() const {
1109     return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1110   }
1111 
1112   bool isMovWSymbolG2() const {
1113     return isMovWSymbol(
1114         {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1115          AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1116          AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1117          AArch64MCExpr::VK_DTPREL_G2});
1118   }
1119 
1120   bool isMovWSymbolG1() const {
1121     return isMovWSymbol(
1122         {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1123          AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1124          AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1125          AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1126          AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1127   }
1128 
1129   bool isMovWSymbolG0() const {
1130     return isMovWSymbol(
1131         {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1132          AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1133          AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1134          AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1135          AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1136   }
1137 
1138   template<int RegWidth, int Shift>
1139   bool isMOVZMovAlias() const {
1140     if (!isImm()) return false;
1141 
1142     const MCExpr *E = getImm();
1143     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1144       uint64_t Value = CE->getValue();
1145 
1146       return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1147     }
1148     // Only supports the case of Shift being 0 if an expression is used as an
1149     // operand
1150     return !Shift && E;
1151   }
1152 
1153   template<int RegWidth, int Shift>
1154   bool isMOVNMovAlias() const {
1155     if (!isImm()) return false;
1156 
1157     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1158     if (!CE) return false;
1159     uint64_t Value = CE->getValue();
1160 
1161     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1162   }
1163 
1164   bool isFPImm() const {
1165     return Kind == k_FPImm &&
1166            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1167   }
1168 
1169   bool isBarrier() const {
1170     return Kind == k_Barrier && !getBarriernXSModifier();
1171   }
1172   bool isBarriernXS() const {
1173     return Kind == k_Barrier && getBarriernXSModifier();
1174   }
1175   bool isSysReg() const { return Kind == k_SysReg; }
1176 
1177   bool isMRSSystemRegister() const {
1178     if (!isSysReg()) return false;
1179 
1180     return SysReg.MRSReg != -1U;
1181   }
1182 
1183   bool isMSRSystemRegister() const {
1184     if (!isSysReg()) return false;
1185     return SysReg.MSRReg != -1U;
1186   }
1187 
1188   bool isSystemPStateFieldWithImm0_1() const {
1189     if (!isSysReg()) return false;
1190     return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1191   }
1192 
1193   bool isSystemPStateFieldWithImm0_15() const {
1194     if (!isSysReg())
1195       return false;
1196     return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1197   }
1198 
1199   bool isSVCR() const {
1200     if (Kind != k_SVCR)
1201       return false;
1202     return SVCR.PStateField != -1U;
1203   }
1204 
1205   bool isReg() const override {
1206     return Kind == k_Register;
1207   }
1208 
1209   bool isVectorList() const { return Kind == k_VectorList; }
1210 
1211   bool isScalarReg() const {
1212     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1213   }
1214 
1215   bool isNeonVectorReg() const {
1216     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1217   }
1218 
1219   bool isNeonVectorRegLo() const {
1220     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1221            (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1222                 Reg.RegNum) ||
1223             AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1224                 Reg.RegNum));
1225   }
1226 
1227   bool isNeonVectorReg0to7() const {
1228     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1229            (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1230                Reg.RegNum));
1231   }
1232 
1233   bool isMatrix() const { return Kind == k_MatrixRegister; }
1234   bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1235 
1236   template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1237     RegKind RK;
1238     switch (Class) {
1239     case AArch64::PPRRegClassID:
1240     case AArch64::PPR_3bRegClassID:
1241     case AArch64::PPR_p8to15RegClassID:
1242     case AArch64::PNRRegClassID:
1243     case AArch64::PNR_p8to15RegClassID:
1244       RK = RegKind::SVEPredicateAsCounter;
1245       break;
1246     default:
1247       llvm_unreachable("Unsupport register class");
1248     }
1249 
1250     return (Kind == k_Register && Reg.Kind == RK) &&
1251            AArch64MCRegisterClasses[Class].contains(getReg());
1252   }
1253 
1254   template <unsigned Class> bool isSVEVectorReg() const {
1255     RegKind RK;
1256     switch (Class) {
1257     case AArch64::ZPRRegClassID:
1258     case AArch64::ZPR_3bRegClassID:
1259     case AArch64::ZPR_4bRegClassID:
1260       RK = RegKind::SVEDataVector;
1261       break;
1262     case AArch64::PPRRegClassID:
1263     case AArch64::PPR_3bRegClassID:
1264     case AArch64::PPR_p8to15RegClassID:
1265     case AArch64::PNRRegClassID:
1266     case AArch64::PNR_p8to15RegClassID:
1267       RK = RegKind::SVEPredicateVector;
1268       break;
1269     default:
1270       llvm_unreachable("Unsupport register class");
1271     }
1272 
1273     return (Kind == k_Register && Reg.Kind == RK) &&
1274            AArch64MCRegisterClasses[Class].contains(getReg());
1275   }
1276 
1277   template <unsigned Class> bool isFPRasZPR() const {
1278     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1279            AArch64MCRegisterClasses[Class].contains(getReg());
1280   }
1281 
1282   template <int ElementWidth, unsigned Class>
1283   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1284     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1285       return DiagnosticPredicateTy::NoMatch;
1286 
1287     if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1288       return DiagnosticPredicateTy::Match;
1289 
1290     return DiagnosticPredicateTy::NearMatch;
1291   }
1292 
1293   template <int ElementWidth, unsigned Class>
1294   DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1295     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1296       return DiagnosticPredicateTy::NoMatch;
1297 
1298     if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1299       return DiagnosticPredicateTy::Match;
1300 
1301     return DiagnosticPredicateTy::NearMatch;
1302   }
1303 
1304   template <int ElementWidth, unsigned Class>
1305   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1306     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1307       return DiagnosticPredicateTy::NoMatch;
1308 
1309     if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1310       return DiagnosticPredicateTy::Match;
1311 
1312     return DiagnosticPredicateTy::NearMatch;
1313   }
1314 
1315   template <int ElementWidth, unsigned Class,
1316             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1317             bool ShiftWidthAlwaysSame>
1318   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1319     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1320     if (!VectorMatch.isMatch())
1321       return DiagnosticPredicateTy::NoMatch;
1322 
1323     // Give a more specific diagnostic when the user has explicitly typed in
1324     // a shift-amount that does not match what is expected, but for which
1325     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1326     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1327     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1328                         ShiftExtendTy == AArch64_AM::SXTW) &&
1329         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1330       return DiagnosticPredicateTy::NoMatch;
1331 
1332     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1333       return DiagnosticPredicateTy::Match;
1334 
1335     return DiagnosticPredicateTy::NearMatch;
1336   }
1337 
1338   bool isGPR32as64() const {
1339     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1340       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1341   }
1342 
1343   bool isGPR64as32() const {
1344     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1345       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1346   }
1347 
1348   bool isGPR64x8() const {
1349     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1350            AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1351                Reg.RegNum);
1352   }
1353 
1354   bool isWSeqPair() const {
1355     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1356            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1357                Reg.RegNum);
1358   }
1359 
1360   bool isXSeqPair() const {
1361     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1362            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1363                Reg.RegNum);
1364   }
1365 
1366   bool isSyspXzrPair() const {
1367     return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1368   }
1369 
1370   template<int64_t Angle, int64_t Remainder>
1371   DiagnosticPredicate isComplexRotation() const {
1372     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1373 
1374     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1375     if (!CE) return DiagnosticPredicateTy::NoMatch;
1376     uint64_t Value = CE->getValue();
1377 
1378     if (Value % Angle == Remainder && Value <= 270)
1379       return DiagnosticPredicateTy::Match;
1380     return DiagnosticPredicateTy::NearMatch;
1381   }
1382 
1383   template <unsigned RegClassID> bool isGPR64() const {
1384     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1385            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1386   }
1387 
1388   template <unsigned RegClassID, int ExtWidth>
1389   DiagnosticPredicate isGPR64WithShiftExtend() const {
1390     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1391       return DiagnosticPredicateTy::NoMatch;
1392 
1393     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1394         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1395       return DiagnosticPredicateTy::Match;
1396     return DiagnosticPredicateTy::NearMatch;
1397   }
1398 
1399   /// Is this a vector list with the type implicit (presumably attached to the
1400   /// instruction itself)?
1401   template <RegKind VectorKind, unsigned NumRegs>
1402   bool isImplicitlyTypedVectorList() const {
1403     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1404            VectorList.NumElements == 0 &&
1405            VectorList.RegisterKind == VectorKind;
1406   }
1407 
1408   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1409             unsigned ElementWidth, unsigned Stride = 1>
1410   bool isTypedVectorList() const {
1411     if (Kind != k_VectorList)
1412       return false;
1413     if (VectorList.Count != NumRegs)
1414       return false;
1415     if (VectorList.RegisterKind != VectorKind)
1416       return false;
1417     if (VectorList.ElementWidth != ElementWidth)
1418       return false;
1419     if (VectorList.Stride != Stride)
1420       return false;
1421     return VectorList.NumElements == NumElements;
1422   }
1423 
1424   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1425             unsigned ElementWidth>
1426   DiagnosticPredicate isTypedVectorListMultiple() const {
1427     bool Res =
1428         isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1429     if (!Res)
1430       return DiagnosticPredicateTy::NoMatch;
1431     if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1432       return DiagnosticPredicateTy::NearMatch;
1433     return DiagnosticPredicateTy::Match;
1434   }
1435 
1436   template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1437             unsigned ElementWidth>
1438   DiagnosticPredicate isTypedVectorListStrided() const {
1439     bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1440                                  ElementWidth, Stride>();
1441     if (!Res)
1442       return DiagnosticPredicateTy::NoMatch;
1443     if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1444         ((VectorList.RegNum >= AArch64::Z16) &&
1445          (VectorList.RegNum < (AArch64::Z16 + Stride))))
1446       return DiagnosticPredicateTy::Match;
1447     return DiagnosticPredicateTy::NoMatch;
1448   }
1449 
1450   template <int Min, int Max>
1451   DiagnosticPredicate isVectorIndex() const {
1452     if (Kind != k_VectorIndex)
1453       return DiagnosticPredicateTy::NoMatch;
1454     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1455       return DiagnosticPredicateTy::Match;
1456     return DiagnosticPredicateTy::NearMatch;
1457   }
1458 
1459   bool isToken() const override { return Kind == k_Token; }
1460 
1461   bool isTokenEqual(StringRef Str) const {
1462     return Kind == k_Token && getToken() == Str;
1463   }
1464   bool isSysCR() const { return Kind == k_SysCR; }
1465   bool isPrefetch() const { return Kind == k_Prefetch; }
1466   bool isPSBHint() const { return Kind == k_PSBHint; }
1467   bool isBTIHint() const { return Kind == k_BTIHint; }
1468   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1469   bool isShifter() const {
1470     if (!isShiftExtend())
1471       return false;
1472 
1473     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1474     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1475             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1476             ST == AArch64_AM::MSL);
1477   }
1478 
1479   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1480     if (Kind != k_FPImm)
1481       return DiagnosticPredicateTy::NoMatch;
1482 
1483     if (getFPImmIsExact()) {
1484       // Lookup the immediate from table of supported immediates.
1485       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1486       assert(Desc && "Unknown enum value");
1487 
1488       // Calculate its FP value.
1489       APFloat RealVal(APFloat::IEEEdouble());
1490       auto StatusOrErr =
1491           RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1492       if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1493         llvm_unreachable("FP immediate is not exact");
1494 
1495       if (getFPImm().bitwiseIsEqual(RealVal))
1496         return DiagnosticPredicateTy::Match;
1497     }
1498 
1499     return DiagnosticPredicateTy::NearMatch;
1500   }
1501 
1502   template <unsigned ImmA, unsigned ImmB>
1503   DiagnosticPredicate isExactFPImm() const {
1504     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1505     if ((Res = isExactFPImm<ImmA>()))
1506       return DiagnosticPredicateTy::Match;
1507     if ((Res = isExactFPImm<ImmB>()))
1508       return DiagnosticPredicateTy::Match;
1509     return Res;
1510   }
1511 
1512   bool isExtend() const {
1513     if (!isShiftExtend())
1514       return false;
1515 
1516     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1517     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1518             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1519             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1520             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1521             ET == AArch64_AM::LSL) &&
1522            getShiftExtendAmount() <= 4;
1523   }
1524 
1525   bool isExtend64() const {
1526     if (!isExtend())
1527       return false;
1528     // Make sure the extend expects a 32-bit source register.
1529     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1530     return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1531            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1532            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1533   }
1534 
1535   bool isExtendLSL64() const {
1536     if (!isExtend())
1537       return false;
1538     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1539     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1540             ET == AArch64_AM::LSL) &&
1541            getShiftExtendAmount() <= 4;
1542   }
1543 
1544   bool isLSLImm3Shift() const {
1545     if (!isShiftExtend())
1546       return false;
1547     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548     return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1549   }
1550 
1551   template<int Width> bool isMemXExtend() const {
1552     if (!isExtend())
1553       return false;
1554     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1555     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1556            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1557             getShiftExtendAmount() == 0);
1558   }
1559 
1560   template<int Width> bool isMemWExtend() const {
1561     if (!isExtend())
1562       return false;
1563     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1564     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1565            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1566             getShiftExtendAmount() == 0);
1567   }
1568 
1569   template <unsigned width>
1570   bool isArithmeticShifter() const {
1571     if (!isShifter())
1572       return false;
1573 
1574     // An arithmetic shifter is LSL, LSR, or ASR.
1575     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1576     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1577             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1578   }
1579 
1580   template <unsigned width>
1581   bool isLogicalShifter() const {
1582     if (!isShifter())
1583       return false;
1584 
1585     // A logical shifter is LSL, LSR, ASR or ROR.
1586     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1587     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1588             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1589            getShiftExtendAmount() < width;
1590   }
1591 
1592   bool isMovImm32Shifter() const {
1593     if (!isShifter())
1594       return false;
1595 
1596     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1597     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1598     if (ST != AArch64_AM::LSL)
1599       return false;
1600     uint64_t Val = getShiftExtendAmount();
1601     return (Val == 0 || Val == 16);
1602   }
1603 
1604   bool isMovImm64Shifter() const {
1605     if (!isShifter())
1606       return false;
1607 
1608     // A MOVi shifter is LSL of 0 or 16.
1609     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1610     if (ST != AArch64_AM::LSL)
1611       return false;
1612     uint64_t Val = getShiftExtendAmount();
1613     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1614   }
1615 
1616   bool isLogicalVecShifter() const {
1617     if (!isShifter())
1618       return false;
1619 
1620     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1621     unsigned Shift = getShiftExtendAmount();
1622     return getShiftExtendType() == AArch64_AM::LSL &&
1623            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1624   }
1625 
1626   bool isLogicalVecHalfWordShifter() const {
1627     if (!isLogicalVecShifter())
1628       return false;
1629 
1630     // A logical vector shifter is a left shift by 0 or 8.
1631     unsigned Shift = getShiftExtendAmount();
1632     return getShiftExtendType() == AArch64_AM::LSL &&
1633            (Shift == 0 || Shift == 8);
1634   }
1635 
1636   bool isMoveVecShifter() const {
1637     if (!isShiftExtend())
1638       return false;
1639 
1640     // A logical vector shifter is a left shift by 8 or 16.
1641     unsigned Shift = getShiftExtendAmount();
1642     return getShiftExtendType() == AArch64_AM::MSL &&
1643            (Shift == 8 || Shift == 16);
1644   }
1645 
1646   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1647   // to LDUR/STUR when the offset is not legal for the former but is for
1648   // the latter. As such, in addition to checking for being a legal unscaled
1649   // address, also check that it is not a legal scaled address. This avoids
1650   // ambiguity in the matcher.
1651   template<int Width>
1652   bool isSImm9OffsetFB() const {
1653     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1654   }
1655 
1656   bool isAdrpLabel() const {
1657     // Validation was handled during parsing, so we just verify that
1658     // something didn't go haywire.
1659     if (!isImm())
1660         return false;
1661 
1662     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1663       int64_t Val = CE->getValue();
1664       int64_t Min = - (4096 * (1LL << (21 - 1)));
1665       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1666       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1667     }
1668 
1669     return true;
1670   }
1671 
1672   bool isAdrLabel() const {
1673     // Validation was handled during parsing, so we just verify that
1674     // something didn't go haywire.
1675     if (!isImm())
1676         return false;
1677 
1678     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1679       int64_t Val = CE->getValue();
1680       int64_t Min = - (1LL << (21 - 1));
1681       int64_t Max = ((1LL << (21 - 1)) - 1);
1682       return Val >= Min && Val <= Max;
1683     }
1684 
1685     return true;
1686   }
1687 
1688   template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1689   DiagnosticPredicate isMatrixRegOperand() const {
1690     if (!isMatrix())
1691       return DiagnosticPredicateTy::NoMatch;
1692     if (getMatrixKind() != Kind ||
1693         !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1694         EltSize != getMatrixElementWidth())
1695       return DiagnosticPredicateTy::NearMatch;
1696     return DiagnosticPredicateTy::Match;
1697   }
1698 
1699   bool isPAuthPCRelLabel16Operand() const {
1700     // PAuth PCRel16 operands are similar to regular branch targets, but only
1701     // negative values are allowed for concrete immediates as signing instr
1702     // should be in a lower address.
1703     if (!isImm())
1704       return false;
1705     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1706     if (!MCE)
1707       return true;
1708     int64_t Val = MCE->getValue();
1709     if (Val & 0b11)
1710       return false;
1711     return (Val <= 0) && (Val > -(1 << 18));
1712   }
1713 
1714   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1715     // Add as immediates when possible.  Null MCExpr = 0.
1716     if (!Expr)
1717       Inst.addOperand(MCOperand::createImm(0));
1718     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1719       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1720     else
1721       Inst.addOperand(MCOperand::createExpr(Expr));
1722   }
1723 
1724   void addRegOperands(MCInst &Inst, unsigned N) const {
1725     assert(N == 1 && "Invalid number of operands!");
1726     Inst.addOperand(MCOperand::createReg(getReg()));
1727   }
1728 
1729   void addMatrixOperands(MCInst &Inst, unsigned N) const {
1730     assert(N == 1 && "Invalid number of operands!");
1731     Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1732   }
1733 
1734   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1735     assert(N == 1 && "Invalid number of operands!");
1736     assert(
1737         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1738 
1739     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1740     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1741         RI->getEncodingValue(getReg()));
1742 
1743     Inst.addOperand(MCOperand::createReg(Reg));
1744   }
1745 
1746   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1747     assert(N == 1 && "Invalid number of operands!");
1748     assert(
1749         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1750 
1751     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1752     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1753         RI->getEncodingValue(getReg()));
1754 
1755     Inst.addOperand(MCOperand::createReg(Reg));
1756   }
1757 
1758   template <int Width>
1759   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1760     unsigned Base;
1761     switch (Width) {
1762     case 8:   Base = AArch64::B0; break;
1763     case 16:  Base = AArch64::H0; break;
1764     case 32:  Base = AArch64::S0; break;
1765     case 64:  Base = AArch64::D0; break;
1766     case 128: Base = AArch64::Q0; break;
1767     default:
1768       llvm_unreachable("Unsupported width");
1769     }
1770     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1771   }
1772 
1773   void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1774     assert(N == 1 && "Invalid number of operands!");
1775     Inst.addOperand(
1776         MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1777   }
1778 
1779   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1780     assert(N == 1 && "Invalid number of operands!");
1781     assert(
1782         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1783     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1784   }
1785 
1786   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1787     assert(N == 1 && "Invalid number of operands!");
1788     assert(
1789         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1790     Inst.addOperand(MCOperand::createReg(getReg()));
1791   }
1792 
1793   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1794     assert(N == 1 && "Invalid number of operands!");
1795     Inst.addOperand(MCOperand::createReg(getReg()));
1796   }
1797 
1798   void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1799     assert(N == 1 && "Invalid number of operands!");
1800     Inst.addOperand(MCOperand::createReg(getReg()));
1801   }
1802 
1803   enum VecListIndexType {
1804     VecListIdx_DReg = 0,
1805     VecListIdx_QReg = 1,
1806     VecListIdx_ZReg = 2,
1807     VecListIdx_PReg = 3,
1808   };
1809 
1810   template <VecListIndexType RegTy, unsigned NumRegs>
1811   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1812     assert(N == 1 && "Invalid number of operands!");
1813     static const unsigned FirstRegs[][5] = {
1814       /* DReg */ { AArch64::Q0,
1815                    AArch64::D0,       AArch64::D0_D1,
1816                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1817       /* QReg */ { AArch64::Q0,
1818                    AArch64::Q0,       AArch64::Q0_Q1,
1819                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1820       /* ZReg */ { AArch64::Z0,
1821                    AArch64::Z0,       AArch64::Z0_Z1,
1822                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1823       /* PReg */ { AArch64::P0,
1824                    AArch64::P0,       AArch64::P0_P1 }
1825     };
1826 
1827     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1828            " NumRegs must be <= 4 for ZRegs");
1829 
1830     assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1831            " NumRegs must be <= 2 for PRegs");
1832 
1833     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1834     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1835                                          FirstRegs[(unsigned)RegTy][0]));
1836   }
1837 
1838   template <unsigned NumRegs>
1839   void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1840     assert(N == 1 && "Invalid number of operands!");
1841     assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1842 
1843     switch (NumRegs) {
1844     case 2:
1845       if (getVectorListStart() < AArch64::Z16) {
1846         assert((getVectorListStart() < AArch64::Z8) &&
1847                (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1848         Inst.addOperand(MCOperand::createReg(
1849             AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1850       } else {
1851         assert((getVectorListStart() < AArch64::Z24) &&
1852                (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1853         Inst.addOperand(MCOperand::createReg(
1854             AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1855       }
1856       break;
1857     case 4:
1858       if (getVectorListStart() < AArch64::Z16) {
1859         assert((getVectorListStart() < AArch64::Z4) &&
1860                (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1861         Inst.addOperand(MCOperand::createReg(
1862             AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1863       } else {
1864         assert((getVectorListStart() < AArch64::Z20) &&
1865                (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1866         Inst.addOperand(MCOperand::createReg(
1867             AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1868       }
1869       break;
1870     default:
1871       llvm_unreachable("Unsupported number of registers for strided vec list");
1872     }
1873   }
1874 
1875   void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1876     assert(N == 1 && "Invalid number of operands!");
1877     unsigned RegMask = getMatrixTileListRegMask();
1878     assert(RegMask <= 0xFF && "Invalid mask!");
1879     Inst.addOperand(MCOperand::createImm(RegMask));
1880   }
1881 
1882   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1883     assert(N == 1 && "Invalid number of operands!");
1884     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1885   }
1886 
1887   template <unsigned ImmIs0, unsigned ImmIs1>
1888   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1889     assert(N == 1 && "Invalid number of operands!");
1890     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1891     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1892   }
1893 
1894   void addImmOperands(MCInst &Inst, unsigned N) const {
1895     assert(N == 1 && "Invalid number of operands!");
1896     // If this is a pageoff symrefexpr with an addend, adjust the addend
1897     // to be only the page-offset portion. Otherwise, just add the expr
1898     // as-is.
1899     addExpr(Inst, getImm());
1900   }
1901 
1902   template <int Shift>
1903   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1904     assert(N == 2 && "Invalid number of operands!");
1905     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1906       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1907       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1908     } else if (isShiftedImm()) {
1909       addExpr(Inst, getShiftedImmVal());
1910       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1911     } else {
1912       addExpr(Inst, getImm());
1913       Inst.addOperand(MCOperand::createImm(0));
1914     }
1915   }
1916 
1917   template <int Shift>
1918   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1919     assert(N == 2 && "Invalid number of operands!");
1920     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1921       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1922       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1923     } else
1924       llvm_unreachable("Not a shifted negative immediate");
1925   }
1926 
1927   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1928     assert(N == 1 && "Invalid number of operands!");
1929     Inst.addOperand(MCOperand::createImm(getCondCode()));
1930   }
1931 
1932   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1933     assert(N == 1 && "Invalid number of operands!");
1934     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1935     if (!MCE)
1936       addExpr(Inst, getImm());
1937     else
1938       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1939   }
1940 
1941   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1942     addImmOperands(Inst, N);
1943   }
1944 
1945   template<int Scale>
1946   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1947     assert(N == 1 && "Invalid number of operands!");
1948     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1949 
1950     if (!MCE) {
1951       Inst.addOperand(MCOperand::createExpr(getImm()));
1952       return;
1953     }
1954     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1955   }
1956 
1957   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1958     assert(N == 1 && "Invalid number of operands!");
1959     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1960     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1961   }
1962 
1963   template <int Scale>
1964   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1965     assert(N == 1 && "Invalid number of operands!");
1966     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1967     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1968   }
1969 
1970   template <int Scale>
1971   void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1972     assert(N == 1 && "Invalid number of operands!");
1973     Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1974   }
1975 
1976   template <typename T>
1977   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1978     assert(N == 1 && "Invalid number of operands!");
1979     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1980     std::make_unsigned_t<T> Val = MCE->getValue();
1981     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1982     Inst.addOperand(MCOperand::createImm(encoding));
1983   }
1984 
1985   template <typename T>
1986   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1987     assert(N == 1 && "Invalid number of operands!");
1988     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1989     std::make_unsigned_t<T> Val = ~MCE->getValue();
1990     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1991     Inst.addOperand(MCOperand::createImm(encoding));
1992   }
1993 
1994   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1995     assert(N == 1 && "Invalid number of operands!");
1996     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1997     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1998     Inst.addOperand(MCOperand::createImm(encoding));
1999   }
2000 
2001   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2002     // Branch operands don't encode the low bits, so shift them off
2003     // here. If it's a label, however, just put it on directly as there's
2004     // not enough information now to do anything.
2005     assert(N == 1 && "Invalid number of operands!");
2006     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2007     if (!MCE) {
2008       addExpr(Inst, getImm());
2009       return;
2010     }
2011     assert(MCE && "Invalid constant immediate operand!");
2012     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2013   }
2014 
2015   void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2016     // PC-relative operands don't encode the low bits, so shift them off
2017     // here. If it's a label, however, just put it on directly as there's
2018     // not enough information now to do anything.
2019     assert(N == 1 && "Invalid number of operands!");
2020     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2021     if (!MCE) {
2022       addExpr(Inst, getImm());
2023       return;
2024     }
2025     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2026   }
2027 
2028   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2029     // Branch operands don't encode the low bits, so shift them off
2030     // here. If it's a label, however, just put it on directly as there's
2031     // not enough information now to do anything.
2032     assert(N == 1 && "Invalid number of operands!");
2033     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2034     if (!MCE) {
2035       addExpr(Inst, getImm());
2036       return;
2037     }
2038     assert(MCE && "Invalid constant immediate operand!");
2039     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2040   }
2041 
2042   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2043     // Branch operands don't encode the low bits, so shift them off
2044     // here. If it's a label, however, just put it on directly as there's
2045     // not enough information now to do anything.
2046     assert(N == 1 && "Invalid number of operands!");
2047     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2048     if (!MCE) {
2049       addExpr(Inst, getImm());
2050       return;
2051     }
2052     assert(MCE && "Invalid constant immediate operand!");
2053     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2054   }
2055 
2056   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2057     assert(N == 1 && "Invalid number of operands!");
2058     Inst.addOperand(MCOperand::createImm(
2059         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2060   }
2061 
2062   void addBarrierOperands(MCInst &Inst, unsigned N) const {
2063     assert(N == 1 && "Invalid number of operands!");
2064     Inst.addOperand(MCOperand::createImm(getBarrier()));
2065   }
2066 
2067   void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2068     assert(N == 1 && "Invalid number of operands!");
2069     Inst.addOperand(MCOperand::createImm(getBarrier()));
2070   }
2071 
2072   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2073     assert(N == 1 && "Invalid number of operands!");
2074 
2075     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2076   }
2077 
2078   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2079     assert(N == 1 && "Invalid number of operands!");
2080 
2081     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2082   }
2083 
2084   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2085     assert(N == 1 && "Invalid number of operands!");
2086 
2087     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2088   }
2089 
2090   void addSVCROperands(MCInst &Inst, unsigned N) const {
2091     assert(N == 1 && "Invalid number of operands!");
2092 
2093     Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2094   }
2095 
2096   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2097     assert(N == 1 && "Invalid number of operands!");
2098 
2099     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2100   }
2101 
2102   void addSysCROperands(MCInst &Inst, unsigned N) const {
2103     assert(N == 1 && "Invalid number of operands!");
2104     Inst.addOperand(MCOperand::createImm(getSysCR()));
2105   }
2106 
2107   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2108     assert(N == 1 && "Invalid number of operands!");
2109     Inst.addOperand(MCOperand::createImm(getPrefetch()));
2110   }
2111 
2112   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2113     assert(N == 1 && "Invalid number of operands!");
2114     Inst.addOperand(MCOperand::createImm(getPSBHint()));
2115   }
2116 
2117   void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2118     assert(N == 1 && "Invalid number of operands!");
2119     Inst.addOperand(MCOperand::createImm(getBTIHint()));
2120   }
2121 
2122   void addShifterOperands(MCInst &Inst, unsigned N) const {
2123     assert(N == 1 && "Invalid number of operands!");
2124     unsigned Imm =
2125         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2126     Inst.addOperand(MCOperand::createImm(Imm));
2127   }
2128 
2129   void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2130     assert(N == 1 && "Invalid number of operands!");
2131     unsigned Imm = getShiftExtendAmount();
2132     Inst.addOperand(MCOperand::createImm(Imm));
2133   }
2134 
2135   void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2136     assert(N == 1 && "Invalid number of operands!");
2137 
2138     if (!isScalarReg())
2139       return;
2140 
2141     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2142     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2143                        .getRegister(RI->getEncodingValue(getReg()));
2144     if (Reg != AArch64::XZR)
2145       llvm_unreachable("wrong register");
2146 
2147     Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2148   }
2149 
2150   void addExtendOperands(MCInst &Inst, unsigned N) const {
2151     assert(N == 1 && "Invalid number of operands!");
2152     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2153     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2154     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2155     Inst.addOperand(MCOperand::createImm(Imm));
2156   }
2157 
2158   void addExtend64Operands(MCInst &Inst, unsigned N) const {
2159     assert(N == 1 && "Invalid number of operands!");
2160     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2161     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2162     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2163     Inst.addOperand(MCOperand::createImm(Imm));
2164   }
2165 
2166   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2167     assert(N == 2 && "Invalid number of operands!");
2168     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2169     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2170     Inst.addOperand(MCOperand::createImm(IsSigned));
2171     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2172   }
2173 
2174   // For 8-bit load/store instructions with a register offset, both the
2175   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2176   // they're disambiguated by whether the shift was explicit or implicit rather
2177   // than its size.
2178   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2179     assert(N == 2 && "Invalid number of operands!");
2180     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2181     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2182     Inst.addOperand(MCOperand::createImm(IsSigned));
2183     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2184   }
2185 
2186   template<int Shift>
2187   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2188     assert(N == 1 && "Invalid number of operands!");
2189 
2190     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2191     if (CE) {
2192       uint64_t Value = CE->getValue();
2193       Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2194     } else {
2195       addExpr(Inst, getImm());
2196     }
2197   }
2198 
2199   template<int Shift>
2200   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2201     assert(N == 1 && "Invalid number of operands!");
2202 
2203     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2204     uint64_t Value = CE->getValue();
2205     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2206   }
2207 
2208   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2209     assert(N == 1 && "Invalid number of operands!");
2210     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2211     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2212   }
2213 
2214   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2215     assert(N == 1 && "Invalid number of operands!");
2216     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2217     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2218   }
2219 
2220   void print(raw_ostream &OS) const override;
2221 
2222   static std::unique_ptr<AArch64Operand>
2223   CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2224     auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2225     Op->Tok.Data = Str.data();
2226     Op->Tok.Length = Str.size();
2227     Op->Tok.IsSuffix = IsSuffix;
2228     Op->StartLoc = S;
2229     Op->EndLoc = S;
2230     return Op;
2231   }
2232 
2233   static std::unique_ptr<AArch64Operand>
2234   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2235             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2236             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2237             unsigned ShiftAmount = 0,
2238             unsigned HasExplicitAmount = false) {
2239     auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2240     Op->Reg.RegNum = RegNum;
2241     Op->Reg.Kind = Kind;
2242     Op->Reg.ElementWidth = 0;
2243     Op->Reg.EqualityTy = EqTy;
2244     Op->Reg.ShiftExtend.Type = ExtTy;
2245     Op->Reg.ShiftExtend.Amount = ShiftAmount;
2246     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2247     Op->StartLoc = S;
2248     Op->EndLoc = E;
2249     return Op;
2250   }
2251 
2252   static std::unique_ptr<AArch64Operand>
2253   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2254                   SMLoc S, SMLoc E, MCContext &Ctx,
2255                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2256                   unsigned ShiftAmount = 0,
2257                   unsigned HasExplicitAmount = false) {
2258     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2259             Kind == RegKind::SVEPredicateVector ||
2260             Kind == RegKind::SVEPredicateAsCounter) &&
2261            "Invalid vector kind");
2262     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2263                         HasExplicitAmount);
2264     Op->Reg.ElementWidth = ElementWidth;
2265     return Op;
2266   }
2267 
2268   static std::unique_ptr<AArch64Operand>
2269   CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2270                    unsigned NumElements, unsigned ElementWidth,
2271                    RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2272     auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2273     Op->VectorList.RegNum = RegNum;
2274     Op->VectorList.Count = Count;
2275     Op->VectorList.Stride = Stride;
2276     Op->VectorList.NumElements = NumElements;
2277     Op->VectorList.ElementWidth = ElementWidth;
2278     Op->VectorList.RegisterKind = RegisterKind;
2279     Op->StartLoc = S;
2280     Op->EndLoc = E;
2281     return Op;
2282   }
2283 
2284   static std::unique_ptr<AArch64Operand>
2285   CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2286     auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2287     Op->VectorIndex.Val = Idx;
2288     Op->StartLoc = S;
2289     Op->EndLoc = E;
2290     return Op;
2291   }
2292 
2293   static std::unique_ptr<AArch64Operand>
2294   CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2295     auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2296     Op->MatrixTileList.RegMask = RegMask;
2297     Op->StartLoc = S;
2298     Op->EndLoc = E;
2299     return Op;
2300   }
2301 
2302   static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2303                                   const unsigned ElementWidth) {
2304     static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2305         RegMap = {
2306             {{0, AArch64::ZAB0},
2307              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2308               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2309             {{8, AArch64::ZAB0},
2310              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2311               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2312             {{16, AArch64::ZAH0},
2313              {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2314             {{16, AArch64::ZAH1},
2315              {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2316             {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2317             {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2318             {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2319             {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2320         };
2321 
2322     if (ElementWidth == 64)
2323       OutRegs.insert(Reg);
2324     else {
2325       std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2326       assert(!Regs.empty() && "Invalid tile or element width!");
2327       for (auto OutReg : Regs)
2328         OutRegs.insert(OutReg);
2329     }
2330   }
2331 
2332   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2333                                                    SMLoc E, MCContext &Ctx) {
2334     auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2335     Op->Imm.Val = Val;
2336     Op->StartLoc = S;
2337     Op->EndLoc = E;
2338     return Op;
2339   }
2340 
2341   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2342                                                           unsigned ShiftAmount,
2343                                                           SMLoc S, SMLoc E,
2344                                                           MCContext &Ctx) {
2345     auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2346     Op->ShiftedImm .Val = Val;
2347     Op->ShiftedImm.ShiftAmount = ShiftAmount;
2348     Op->StartLoc = S;
2349     Op->EndLoc = E;
2350     return Op;
2351   }
2352 
2353   static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2354                                                         unsigned Last, SMLoc S,
2355                                                         SMLoc E,
2356                                                         MCContext &Ctx) {
2357     auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2358     Op->ImmRange.First = First;
2359     Op->ImmRange.Last = Last;
2360     Op->EndLoc = E;
2361     return Op;
2362   }
2363 
2364   static std::unique_ptr<AArch64Operand>
2365   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2366     auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2367     Op->CondCode.Code = Code;
2368     Op->StartLoc = S;
2369     Op->EndLoc = E;
2370     return Op;
2371   }
2372 
2373   static std::unique_ptr<AArch64Operand>
2374   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2375     auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2376     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2377     Op->FPImm.IsExact = IsExact;
2378     Op->StartLoc = S;
2379     Op->EndLoc = S;
2380     return Op;
2381   }
2382 
2383   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2384                                                        StringRef Str,
2385                                                        SMLoc S,
2386                                                        MCContext &Ctx,
2387                                                        bool HasnXSModifier) {
2388     auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2389     Op->Barrier.Val = Val;
2390     Op->Barrier.Data = Str.data();
2391     Op->Barrier.Length = Str.size();
2392     Op->Barrier.HasnXSModifier = HasnXSModifier;
2393     Op->StartLoc = S;
2394     Op->EndLoc = S;
2395     return Op;
2396   }
2397 
2398   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2399                                                       uint32_t MRSReg,
2400                                                       uint32_t MSRReg,
2401                                                       uint32_t PStateField,
2402                                                       MCContext &Ctx) {
2403     auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2404     Op->SysReg.Data = Str.data();
2405     Op->SysReg.Length = Str.size();
2406     Op->SysReg.MRSReg = MRSReg;
2407     Op->SysReg.MSRReg = MSRReg;
2408     Op->SysReg.PStateField = PStateField;
2409     Op->StartLoc = S;
2410     Op->EndLoc = S;
2411     return Op;
2412   }
2413 
2414   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2415                                                      SMLoc E, MCContext &Ctx) {
2416     auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2417     Op->SysCRImm.Val = Val;
2418     Op->StartLoc = S;
2419     Op->EndLoc = E;
2420     return Op;
2421   }
2422 
2423   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2424                                                         StringRef Str,
2425                                                         SMLoc S,
2426                                                         MCContext &Ctx) {
2427     auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2428     Op->Prefetch.Val = Val;
2429     Op->Barrier.Data = Str.data();
2430     Op->Barrier.Length = Str.size();
2431     Op->StartLoc = S;
2432     Op->EndLoc = S;
2433     return Op;
2434   }
2435 
2436   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2437                                                        StringRef Str,
2438                                                        SMLoc S,
2439                                                        MCContext &Ctx) {
2440     auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2441     Op->PSBHint.Val = Val;
2442     Op->PSBHint.Data = Str.data();
2443     Op->PSBHint.Length = Str.size();
2444     Op->StartLoc = S;
2445     Op->EndLoc = S;
2446     return Op;
2447   }
2448 
2449   static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2450                                                        StringRef Str,
2451                                                        SMLoc S,
2452                                                        MCContext &Ctx) {
2453     auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2454     Op->BTIHint.Val = Val | 32;
2455     Op->BTIHint.Data = Str.data();
2456     Op->BTIHint.Length = Str.size();
2457     Op->StartLoc = S;
2458     Op->EndLoc = S;
2459     return Op;
2460   }
2461 
2462   static std::unique_ptr<AArch64Operand>
2463   CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2464                        SMLoc S, SMLoc E, MCContext &Ctx) {
2465     auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2466     Op->MatrixReg.RegNum = RegNum;
2467     Op->MatrixReg.ElementWidth = ElementWidth;
2468     Op->MatrixReg.Kind = Kind;
2469     Op->StartLoc = S;
2470     Op->EndLoc = E;
2471     return Op;
2472   }
2473 
2474   static std::unique_ptr<AArch64Operand>
2475   CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2476     auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2477     Op->SVCR.PStateField = PStateField;
2478     Op->SVCR.Data = Str.data();
2479     Op->SVCR.Length = Str.size();
2480     Op->StartLoc = S;
2481     Op->EndLoc = S;
2482     return Op;
2483   }
2484 
2485   static std::unique_ptr<AArch64Operand>
2486   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2487                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2488     auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2489     Op->ShiftExtend.Type = ShOp;
2490     Op->ShiftExtend.Amount = Val;
2491     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2492     Op->StartLoc = S;
2493     Op->EndLoc = E;
2494     return Op;
2495   }
2496 };
2497 
2498 } // end anonymous namespace.
2499 
2500 void AArch64Operand::print(raw_ostream &OS) const {
2501   switch (Kind) {
2502   case k_FPImm:
2503     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2504     if (!getFPImmIsExact())
2505       OS << " (inexact)";
2506     OS << ">";
2507     break;
2508   case k_Barrier: {
2509     StringRef Name = getBarrierName();
2510     if (!Name.empty())
2511       OS << "<barrier " << Name << ">";
2512     else
2513       OS << "<barrier invalid #" << getBarrier() << ">";
2514     break;
2515   }
2516   case k_Immediate:
2517     OS << *getImm();
2518     break;
2519   case k_ShiftedImm: {
2520     unsigned Shift = getShiftedImmShift();
2521     OS << "<shiftedimm ";
2522     OS << *getShiftedImmVal();
2523     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2524     break;
2525   }
2526   case k_ImmRange: {
2527     OS << "<immrange ";
2528     OS << getFirstImmVal();
2529     OS << ":" << getLastImmVal() << ">";
2530     break;
2531   }
2532   case k_CondCode:
2533     OS << "<condcode " << getCondCode() << ">";
2534     break;
2535   case k_VectorList: {
2536     OS << "<vectorlist ";
2537     unsigned Reg = getVectorListStart();
2538     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2539       OS << Reg + i * getVectorListStride() << " ";
2540     OS << ">";
2541     break;
2542   }
2543   case k_VectorIndex:
2544     OS << "<vectorindex " << getVectorIndex() << ">";
2545     break;
2546   case k_SysReg:
2547     OS << "<sysreg: " << getSysReg() << '>';
2548     break;
2549   case k_Token:
2550     OS << "'" << getToken() << "'";
2551     break;
2552   case k_SysCR:
2553     OS << "c" << getSysCR();
2554     break;
2555   case k_Prefetch: {
2556     StringRef Name = getPrefetchName();
2557     if (!Name.empty())
2558       OS << "<prfop " << Name << ">";
2559     else
2560       OS << "<prfop invalid #" << getPrefetch() << ">";
2561     break;
2562   }
2563   case k_PSBHint:
2564     OS << getPSBHintName();
2565     break;
2566   case k_BTIHint:
2567     OS << getBTIHintName();
2568     break;
2569   case k_MatrixRegister:
2570     OS << "<matrix " << getMatrixReg() << ">";
2571     break;
2572   case k_MatrixTileList: {
2573     OS << "<matrixlist ";
2574     unsigned RegMask = getMatrixTileListRegMask();
2575     unsigned MaxBits = 8;
2576     for (unsigned I = MaxBits; I > 0; --I)
2577       OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2578     OS << '>';
2579     break;
2580   }
2581   case k_SVCR: {
2582     OS << getSVCR();
2583     break;
2584   }
2585   case k_Register:
2586     OS << "<register " << getReg() << ">";
2587     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2588       break;
2589     [[fallthrough]];
2590   case k_ShiftExtend:
2591     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2592        << getShiftExtendAmount();
2593     if (!hasShiftExtendAmount())
2594       OS << "<imp>";
2595     OS << '>';
2596     break;
2597   }
2598 }
2599 
2600 /// @name Auto-generated Match Functions
2601 /// {
2602 
2603 static unsigned MatchRegisterName(StringRef Name);
2604 
2605 /// }
2606 
2607 static unsigned MatchNeonVectorRegName(StringRef Name) {
2608   return StringSwitch<unsigned>(Name.lower())
2609       .Case("v0", AArch64::Q0)
2610       .Case("v1", AArch64::Q1)
2611       .Case("v2", AArch64::Q2)
2612       .Case("v3", AArch64::Q3)
2613       .Case("v4", AArch64::Q4)
2614       .Case("v5", AArch64::Q5)
2615       .Case("v6", AArch64::Q6)
2616       .Case("v7", AArch64::Q7)
2617       .Case("v8", AArch64::Q8)
2618       .Case("v9", AArch64::Q9)
2619       .Case("v10", AArch64::Q10)
2620       .Case("v11", AArch64::Q11)
2621       .Case("v12", AArch64::Q12)
2622       .Case("v13", AArch64::Q13)
2623       .Case("v14", AArch64::Q14)
2624       .Case("v15", AArch64::Q15)
2625       .Case("v16", AArch64::Q16)
2626       .Case("v17", AArch64::Q17)
2627       .Case("v18", AArch64::Q18)
2628       .Case("v19", AArch64::Q19)
2629       .Case("v20", AArch64::Q20)
2630       .Case("v21", AArch64::Q21)
2631       .Case("v22", AArch64::Q22)
2632       .Case("v23", AArch64::Q23)
2633       .Case("v24", AArch64::Q24)
2634       .Case("v25", AArch64::Q25)
2635       .Case("v26", AArch64::Q26)
2636       .Case("v27", AArch64::Q27)
2637       .Case("v28", AArch64::Q28)
2638       .Case("v29", AArch64::Q29)
2639       .Case("v30", AArch64::Q30)
2640       .Case("v31", AArch64::Q31)
2641       .Default(0);
2642 }
2643 
2644 /// Returns an optional pair of (#elements, element-width) if Suffix
2645 /// is a valid vector kind. Where the number of elements in a vector
2646 /// or the vector width is implicit or explicitly unknown (but still a
2647 /// valid suffix kind), 0 is used.
2648 static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2649                                                           RegKind VectorKind) {
2650   std::pair<int, int> Res = {-1, -1};
2651 
2652   switch (VectorKind) {
2653   case RegKind::NeonVector:
2654     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2655               .Case("", {0, 0})
2656               .Case(".1d", {1, 64})
2657               .Case(".1q", {1, 128})
2658               // '.2h' needed for fp16 scalar pairwise reductions
2659               .Case(".2h", {2, 16})
2660               .Case(".2b", {2, 8})
2661               .Case(".2s", {2, 32})
2662               .Case(".2d", {2, 64})
2663               // '.4b' is another special case for the ARMv8.2a dot product
2664               // operand
2665               .Case(".4b", {4, 8})
2666               .Case(".4h", {4, 16})
2667               .Case(".4s", {4, 32})
2668               .Case(".8b", {8, 8})
2669               .Case(".8h", {8, 16})
2670               .Case(".16b", {16, 8})
2671               // Accept the width neutral ones, too, for verbose syntax. If
2672               // those aren't used in the right places, the token operand won't
2673               // match so all will work out.
2674               .Case(".b", {0, 8})
2675               .Case(".h", {0, 16})
2676               .Case(".s", {0, 32})
2677               .Case(".d", {0, 64})
2678               .Default({-1, -1});
2679     break;
2680   case RegKind::SVEPredicateAsCounter:
2681   case RegKind::SVEPredicateVector:
2682   case RegKind::SVEDataVector:
2683   case RegKind::Matrix:
2684     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2685               .Case("", {0, 0})
2686               .Case(".b", {0, 8})
2687               .Case(".h", {0, 16})
2688               .Case(".s", {0, 32})
2689               .Case(".d", {0, 64})
2690               .Case(".q", {0, 128})
2691               .Default({-1, -1});
2692     break;
2693   default:
2694     llvm_unreachable("Unsupported RegKind");
2695   }
2696 
2697   if (Res == std::make_pair(-1, -1))
2698     return std::nullopt;
2699 
2700   return std::optional<std::pair<int, int>>(Res);
2701 }
2702 
2703 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2704   return parseVectorKind(Suffix, VectorKind).has_value();
2705 }
2706 
2707 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2708   return StringSwitch<unsigned>(Name.lower())
2709       .Case("z0", AArch64::Z0)
2710       .Case("z1", AArch64::Z1)
2711       .Case("z2", AArch64::Z2)
2712       .Case("z3", AArch64::Z3)
2713       .Case("z4", AArch64::Z4)
2714       .Case("z5", AArch64::Z5)
2715       .Case("z6", AArch64::Z6)
2716       .Case("z7", AArch64::Z7)
2717       .Case("z8", AArch64::Z8)
2718       .Case("z9", AArch64::Z9)
2719       .Case("z10", AArch64::Z10)
2720       .Case("z11", AArch64::Z11)
2721       .Case("z12", AArch64::Z12)
2722       .Case("z13", AArch64::Z13)
2723       .Case("z14", AArch64::Z14)
2724       .Case("z15", AArch64::Z15)
2725       .Case("z16", AArch64::Z16)
2726       .Case("z17", AArch64::Z17)
2727       .Case("z18", AArch64::Z18)
2728       .Case("z19", AArch64::Z19)
2729       .Case("z20", AArch64::Z20)
2730       .Case("z21", AArch64::Z21)
2731       .Case("z22", AArch64::Z22)
2732       .Case("z23", AArch64::Z23)
2733       .Case("z24", AArch64::Z24)
2734       .Case("z25", AArch64::Z25)
2735       .Case("z26", AArch64::Z26)
2736       .Case("z27", AArch64::Z27)
2737       .Case("z28", AArch64::Z28)
2738       .Case("z29", AArch64::Z29)
2739       .Case("z30", AArch64::Z30)
2740       .Case("z31", AArch64::Z31)
2741       .Default(0);
2742 }
2743 
2744 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2745   return StringSwitch<unsigned>(Name.lower())
2746       .Case("p0", AArch64::P0)
2747       .Case("p1", AArch64::P1)
2748       .Case("p2", AArch64::P2)
2749       .Case("p3", AArch64::P3)
2750       .Case("p4", AArch64::P4)
2751       .Case("p5", AArch64::P5)
2752       .Case("p6", AArch64::P6)
2753       .Case("p7", AArch64::P7)
2754       .Case("p8", AArch64::P8)
2755       .Case("p9", AArch64::P9)
2756       .Case("p10", AArch64::P10)
2757       .Case("p11", AArch64::P11)
2758       .Case("p12", AArch64::P12)
2759       .Case("p13", AArch64::P13)
2760       .Case("p14", AArch64::P14)
2761       .Case("p15", AArch64::P15)
2762       .Default(0);
2763 }
2764 
2765 static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2766   return StringSwitch<unsigned>(Name.lower())
2767       .Case("pn0", AArch64::PN0)
2768       .Case("pn1", AArch64::PN1)
2769       .Case("pn2", AArch64::PN2)
2770       .Case("pn3", AArch64::PN3)
2771       .Case("pn4", AArch64::PN4)
2772       .Case("pn5", AArch64::PN5)
2773       .Case("pn6", AArch64::PN6)
2774       .Case("pn7", AArch64::PN7)
2775       .Case("pn8", AArch64::PN8)
2776       .Case("pn9", AArch64::PN9)
2777       .Case("pn10", AArch64::PN10)
2778       .Case("pn11", AArch64::PN11)
2779       .Case("pn12", AArch64::PN12)
2780       .Case("pn13", AArch64::PN13)
2781       .Case("pn14", AArch64::PN14)
2782       .Case("pn15", AArch64::PN15)
2783       .Default(0);
2784 }
2785 
2786 static unsigned matchMatrixTileListRegName(StringRef Name) {
2787   return StringSwitch<unsigned>(Name.lower())
2788       .Case("za0.d", AArch64::ZAD0)
2789       .Case("za1.d", AArch64::ZAD1)
2790       .Case("za2.d", AArch64::ZAD2)
2791       .Case("za3.d", AArch64::ZAD3)
2792       .Case("za4.d", AArch64::ZAD4)
2793       .Case("za5.d", AArch64::ZAD5)
2794       .Case("za6.d", AArch64::ZAD6)
2795       .Case("za7.d", AArch64::ZAD7)
2796       .Case("za0.s", AArch64::ZAS0)
2797       .Case("za1.s", AArch64::ZAS1)
2798       .Case("za2.s", AArch64::ZAS2)
2799       .Case("za3.s", AArch64::ZAS3)
2800       .Case("za0.h", AArch64::ZAH0)
2801       .Case("za1.h", AArch64::ZAH1)
2802       .Case("za0.b", AArch64::ZAB0)
2803       .Default(0);
2804 }
2805 
2806 static unsigned matchMatrixRegName(StringRef Name) {
2807   return StringSwitch<unsigned>(Name.lower())
2808       .Case("za", AArch64::ZA)
2809       .Case("za0.q", AArch64::ZAQ0)
2810       .Case("za1.q", AArch64::ZAQ1)
2811       .Case("za2.q", AArch64::ZAQ2)
2812       .Case("za3.q", AArch64::ZAQ3)
2813       .Case("za4.q", AArch64::ZAQ4)
2814       .Case("za5.q", AArch64::ZAQ5)
2815       .Case("za6.q", AArch64::ZAQ6)
2816       .Case("za7.q", AArch64::ZAQ7)
2817       .Case("za8.q", AArch64::ZAQ8)
2818       .Case("za9.q", AArch64::ZAQ9)
2819       .Case("za10.q", AArch64::ZAQ10)
2820       .Case("za11.q", AArch64::ZAQ11)
2821       .Case("za12.q", AArch64::ZAQ12)
2822       .Case("za13.q", AArch64::ZAQ13)
2823       .Case("za14.q", AArch64::ZAQ14)
2824       .Case("za15.q", AArch64::ZAQ15)
2825       .Case("za0.d", AArch64::ZAD0)
2826       .Case("za1.d", AArch64::ZAD1)
2827       .Case("za2.d", AArch64::ZAD2)
2828       .Case("za3.d", AArch64::ZAD3)
2829       .Case("za4.d", AArch64::ZAD4)
2830       .Case("za5.d", AArch64::ZAD5)
2831       .Case("za6.d", AArch64::ZAD6)
2832       .Case("za7.d", AArch64::ZAD7)
2833       .Case("za0.s", AArch64::ZAS0)
2834       .Case("za1.s", AArch64::ZAS1)
2835       .Case("za2.s", AArch64::ZAS2)
2836       .Case("za3.s", AArch64::ZAS3)
2837       .Case("za0.h", AArch64::ZAH0)
2838       .Case("za1.h", AArch64::ZAH1)
2839       .Case("za0.b", AArch64::ZAB0)
2840       .Case("za0h.q", AArch64::ZAQ0)
2841       .Case("za1h.q", AArch64::ZAQ1)
2842       .Case("za2h.q", AArch64::ZAQ2)
2843       .Case("za3h.q", AArch64::ZAQ3)
2844       .Case("za4h.q", AArch64::ZAQ4)
2845       .Case("za5h.q", AArch64::ZAQ5)
2846       .Case("za6h.q", AArch64::ZAQ6)
2847       .Case("za7h.q", AArch64::ZAQ7)
2848       .Case("za8h.q", AArch64::ZAQ8)
2849       .Case("za9h.q", AArch64::ZAQ9)
2850       .Case("za10h.q", AArch64::ZAQ10)
2851       .Case("za11h.q", AArch64::ZAQ11)
2852       .Case("za12h.q", AArch64::ZAQ12)
2853       .Case("za13h.q", AArch64::ZAQ13)
2854       .Case("za14h.q", AArch64::ZAQ14)
2855       .Case("za15h.q", AArch64::ZAQ15)
2856       .Case("za0h.d", AArch64::ZAD0)
2857       .Case("za1h.d", AArch64::ZAD1)
2858       .Case("za2h.d", AArch64::ZAD2)
2859       .Case("za3h.d", AArch64::ZAD3)
2860       .Case("za4h.d", AArch64::ZAD4)
2861       .Case("za5h.d", AArch64::ZAD5)
2862       .Case("za6h.d", AArch64::ZAD6)
2863       .Case("za7h.d", AArch64::ZAD7)
2864       .Case("za0h.s", AArch64::ZAS0)
2865       .Case("za1h.s", AArch64::ZAS1)
2866       .Case("za2h.s", AArch64::ZAS2)
2867       .Case("za3h.s", AArch64::ZAS3)
2868       .Case("za0h.h", AArch64::ZAH0)
2869       .Case("za1h.h", AArch64::ZAH1)
2870       .Case("za0h.b", AArch64::ZAB0)
2871       .Case("za0v.q", AArch64::ZAQ0)
2872       .Case("za1v.q", AArch64::ZAQ1)
2873       .Case("za2v.q", AArch64::ZAQ2)
2874       .Case("za3v.q", AArch64::ZAQ3)
2875       .Case("za4v.q", AArch64::ZAQ4)
2876       .Case("za5v.q", AArch64::ZAQ5)
2877       .Case("za6v.q", AArch64::ZAQ6)
2878       .Case("za7v.q", AArch64::ZAQ7)
2879       .Case("za8v.q", AArch64::ZAQ8)
2880       .Case("za9v.q", AArch64::ZAQ9)
2881       .Case("za10v.q", AArch64::ZAQ10)
2882       .Case("za11v.q", AArch64::ZAQ11)
2883       .Case("za12v.q", AArch64::ZAQ12)
2884       .Case("za13v.q", AArch64::ZAQ13)
2885       .Case("za14v.q", AArch64::ZAQ14)
2886       .Case("za15v.q", AArch64::ZAQ15)
2887       .Case("za0v.d", AArch64::ZAD0)
2888       .Case("za1v.d", AArch64::ZAD1)
2889       .Case("za2v.d", AArch64::ZAD2)
2890       .Case("za3v.d", AArch64::ZAD3)
2891       .Case("za4v.d", AArch64::ZAD4)
2892       .Case("za5v.d", AArch64::ZAD5)
2893       .Case("za6v.d", AArch64::ZAD6)
2894       .Case("za7v.d", AArch64::ZAD7)
2895       .Case("za0v.s", AArch64::ZAS0)
2896       .Case("za1v.s", AArch64::ZAS1)
2897       .Case("za2v.s", AArch64::ZAS2)
2898       .Case("za3v.s", AArch64::ZAS3)
2899       .Case("za0v.h", AArch64::ZAH0)
2900       .Case("za1v.h", AArch64::ZAH1)
2901       .Case("za0v.b", AArch64::ZAB0)
2902       .Default(0);
2903 }
2904 
2905 bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2906                                      SMLoc &EndLoc) {
2907   return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2908 }
2909 
2910 ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
2911                                                SMLoc &EndLoc) {
2912   StartLoc = getLoc();
2913   ParseStatus Res = tryParseScalarRegister(Reg);
2914   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2915   return Res;
2916 }
2917 
2918 // Matches a register name or register alias previously defined by '.req'
2919 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2920                                                   RegKind Kind) {
2921   unsigned RegNum = 0;
2922   if ((RegNum = matchSVEDataVectorRegName(Name)))
2923     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2924 
2925   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2926     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2927 
2928   if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
2929     return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2930 
2931   if ((RegNum = MatchNeonVectorRegName(Name)))
2932     return Kind == RegKind::NeonVector ? RegNum : 0;
2933 
2934   if ((RegNum = matchMatrixRegName(Name)))
2935     return Kind == RegKind::Matrix ? RegNum : 0;
2936 
2937  if (Name.equals_insensitive("zt0"))
2938     return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2939 
2940   // The parsed register must be of RegKind Scalar
2941   if ((RegNum = MatchRegisterName(Name)))
2942     return (Kind == RegKind::Scalar) ? RegNum : 0;
2943 
2944   if (!RegNum) {
2945     // Handle a few common aliases of registers.
2946     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2947                     .Case("fp", AArch64::FP)
2948                     .Case("lr",  AArch64::LR)
2949                     .Case("x31", AArch64::XZR)
2950                     .Case("w31", AArch64::WZR)
2951                     .Default(0))
2952       return Kind == RegKind::Scalar ? RegNum : 0;
2953 
2954     // Check for aliases registered via .req. Canonicalize to lower case.
2955     // That's more consistent since register names are case insensitive, and
2956     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2957     auto Entry = RegisterReqs.find(Name.lower());
2958     if (Entry == RegisterReqs.end())
2959       return 0;
2960 
2961     // set RegNum if the match is the right kind of register
2962     if (Kind == Entry->getValue().first)
2963       RegNum = Entry->getValue().second;
2964   }
2965   return RegNum;
2966 }
2967 
2968 unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2969   switch (K) {
2970   case RegKind::Scalar:
2971   case RegKind::NeonVector:
2972   case RegKind::SVEDataVector:
2973     return 32;
2974   case RegKind::Matrix:
2975   case RegKind::SVEPredicateVector:
2976   case RegKind::SVEPredicateAsCounter:
2977     return 16;
2978   case RegKind::LookupTable:
2979     return 1;
2980   }
2981   llvm_unreachable("Unsupported RegKind");
2982 }
2983 
2984 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2985 /// Identifier when called, and if it is a register name the token is eaten and
2986 /// the register is added to the operand list.
2987 ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2988   const AsmToken &Tok = getTok();
2989   if (Tok.isNot(AsmToken::Identifier))
2990     return ParseStatus::NoMatch;
2991 
2992   std::string lowerCase = Tok.getString().lower();
2993   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2994   if (Reg == 0)
2995     return ParseStatus::NoMatch;
2996 
2997   RegNum = Reg;
2998   Lex(); // Eat identifier token.
2999   return ParseStatus::Success;
3000 }
3001 
3002 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3003 ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3004   SMLoc S = getLoc();
3005 
3006   if (getTok().isNot(AsmToken::Identifier))
3007     return Error(S, "Expected cN operand where 0 <= N <= 15");
3008 
3009   StringRef Tok = getTok().getIdentifier();
3010   if (Tok[0] != 'c' && Tok[0] != 'C')
3011     return Error(S, "Expected cN operand where 0 <= N <= 15");
3012 
3013   uint32_t CRNum;
3014   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3015   if (BadNum || CRNum > 15)
3016     return Error(S, "Expected cN operand where 0 <= N <= 15");
3017 
3018   Lex(); // Eat identifier token.
3019   Operands.push_back(
3020       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3021   return ParseStatus::Success;
3022 }
3023 
3024 // Either an identifier for named values or a 6-bit immediate.
3025 ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3026   SMLoc S = getLoc();
3027   const AsmToken &Tok = getTok();
3028 
3029   unsigned MaxVal = 63;
3030 
3031   // Immediate case, with optional leading hash:
3032   if (parseOptionalToken(AsmToken::Hash) ||
3033       Tok.is(AsmToken::Integer)) {
3034     const MCExpr *ImmVal;
3035     if (getParser().parseExpression(ImmVal))
3036       return ParseStatus::Failure;
3037 
3038     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3039     if (!MCE)
3040       return TokError("immediate value expected for prefetch operand");
3041     unsigned prfop = MCE->getValue();
3042     if (prfop > MaxVal)
3043       return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3044                       "] expected");
3045 
3046     auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3047     Operands.push_back(AArch64Operand::CreatePrefetch(
3048         prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3049     return ParseStatus::Success;
3050   }
3051 
3052   if (Tok.isNot(AsmToken::Identifier))
3053     return TokError("prefetch hint expected");
3054 
3055   auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3056   if (!RPRFM)
3057     return TokError("prefetch hint expected");
3058 
3059   Operands.push_back(AArch64Operand::CreatePrefetch(
3060       RPRFM->Encoding, Tok.getString(), S, getContext()));
3061   Lex(); // Eat identifier token.
3062   return ParseStatus::Success;
3063 }
3064 
3065 /// tryParsePrefetch - Try to parse a prefetch operand.
3066 template <bool IsSVEPrefetch>
3067 ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3068   SMLoc S = getLoc();
3069   const AsmToken &Tok = getTok();
3070 
3071   auto LookupByName = [](StringRef N) {
3072     if (IsSVEPrefetch) {
3073       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3074         return std::optional<unsigned>(Res->Encoding);
3075     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3076       return std::optional<unsigned>(Res->Encoding);
3077     return std::optional<unsigned>();
3078   };
3079 
3080   auto LookupByEncoding = [](unsigned E) {
3081     if (IsSVEPrefetch) {
3082       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3083         return std::optional<StringRef>(Res->Name);
3084     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3085       return std::optional<StringRef>(Res->Name);
3086     return std::optional<StringRef>();
3087   };
3088   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3089 
3090   // Either an identifier for named values or a 5-bit immediate.
3091   // Eat optional hash.
3092   if (parseOptionalToken(AsmToken::Hash) ||
3093       Tok.is(AsmToken::Integer)) {
3094     const MCExpr *ImmVal;
3095     if (getParser().parseExpression(ImmVal))
3096       return ParseStatus::Failure;
3097 
3098     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3099     if (!MCE)
3100       return TokError("immediate value expected for prefetch operand");
3101     unsigned prfop = MCE->getValue();
3102     if (prfop > MaxVal)
3103       return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3104                       "] expected");
3105 
3106     auto PRFM = LookupByEncoding(MCE->getValue());
3107     Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3108                                                       S, getContext()));
3109     return ParseStatus::Success;
3110   }
3111 
3112   if (Tok.isNot(AsmToken::Identifier))
3113     return TokError("prefetch hint expected");
3114 
3115   auto PRFM = LookupByName(Tok.getString());
3116   if (!PRFM)
3117     return TokError("prefetch hint expected");
3118 
3119   Operands.push_back(AArch64Operand::CreatePrefetch(
3120       *PRFM, Tok.getString(), S, getContext()));
3121   Lex(); // Eat identifier token.
3122   return ParseStatus::Success;
3123 }
3124 
3125 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3126 ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3127   SMLoc S = getLoc();
3128   const AsmToken &Tok = getTok();
3129   if (Tok.isNot(AsmToken::Identifier))
3130     return TokError("invalid operand for instruction");
3131 
3132   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3133   if (!PSB)
3134     return TokError("invalid operand for instruction");
3135 
3136   Operands.push_back(AArch64Operand::CreatePSBHint(
3137       PSB->Encoding, Tok.getString(), S, getContext()));
3138   Lex(); // Eat identifier token.
3139   return ParseStatus::Success;
3140 }
3141 
3142 ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3143   SMLoc StartLoc = getLoc();
3144 
3145   MCRegister RegNum;
3146 
3147   // The case where xzr, xzr is not present is handled by an InstAlias.
3148 
3149   auto RegTok = getTok(); // in case we need to backtrack
3150   if (!tryParseScalarRegister(RegNum).isSuccess())
3151     return ParseStatus::NoMatch;
3152 
3153   if (RegNum != AArch64::XZR) {
3154     getLexer().UnLex(RegTok);
3155     return ParseStatus::NoMatch;
3156   }
3157 
3158   if (parseComma())
3159     return ParseStatus::Failure;
3160 
3161   if (!tryParseScalarRegister(RegNum).isSuccess())
3162     return TokError("expected register operand");
3163 
3164   if (RegNum != AArch64::XZR)
3165     return TokError("xzr must be followed by xzr");
3166 
3167   // We need to push something, since we claim this is an operand in .td.
3168   // See also AArch64AsmParser::parseKeywordOperand.
3169   Operands.push_back(AArch64Operand::CreateReg(
3170       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3171 
3172   return ParseStatus::Success;
3173 }
3174 
3175 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3176 ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3177   SMLoc S = getLoc();
3178   const AsmToken &Tok = getTok();
3179   if (Tok.isNot(AsmToken::Identifier))
3180     return TokError("invalid operand for instruction");
3181 
3182   auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3183   if (!BTI)
3184     return TokError("invalid operand for instruction");
3185 
3186   Operands.push_back(AArch64Operand::CreateBTIHint(
3187       BTI->Encoding, Tok.getString(), S, getContext()));
3188   Lex(); // Eat identifier token.
3189   return ParseStatus::Success;
3190 }
3191 
3192 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3193 /// instruction.
3194 ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3195   SMLoc S = getLoc();
3196   const MCExpr *Expr = nullptr;
3197 
3198   if (getTok().is(AsmToken::Hash)) {
3199     Lex(); // Eat hash token.
3200   }
3201 
3202   if (parseSymbolicImmVal(Expr))
3203     return ParseStatus::Failure;
3204 
3205   AArch64MCExpr::VariantKind ELFRefKind;
3206   MCSymbolRefExpr::VariantKind DarwinRefKind;
3207   int64_t Addend;
3208   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3209     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3210         ELFRefKind == AArch64MCExpr::VK_INVALID) {
3211       // No modifier was specified at all; this is the syntax for an ELF basic
3212       // ADRP relocation (unfortunately).
3213       Expr =
3214           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
3215     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3216                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3217                Addend != 0) {
3218       return Error(S, "gotpage label reference not allowed an addend");
3219     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3220                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3221                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3222                ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3223                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3224                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3225                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3226                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3227       // The operand must be an @page or @gotpage qualified symbolref.
3228       return Error(S, "page or gotpage label reference expected");
3229     }
3230   }
3231 
3232   // We have either a label reference possibly with addend or an immediate. The
3233   // addend is a raw value here. The linker will adjust it to only reference the
3234   // page.
3235   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3237 
3238   return ParseStatus::Success;
3239 }
3240 
3241 /// tryParseAdrLabel - Parse and validate a source label for the ADR
3242 /// instruction.
3243 ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3244   SMLoc S = getLoc();
3245   const MCExpr *Expr = nullptr;
3246 
3247   // Leave anything with a bracket to the default for SVE
3248   if (getTok().is(AsmToken::LBrac))
3249     return ParseStatus::NoMatch;
3250 
3251   if (getTok().is(AsmToken::Hash))
3252     Lex(); // Eat hash token.
3253 
3254   if (parseSymbolicImmVal(Expr))
3255     return ParseStatus::Failure;
3256 
3257   AArch64MCExpr::VariantKind ELFRefKind;
3258   MCSymbolRefExpr::VariantKind DarwinRefKind;
3259   int64_t Addend;
3260   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3261     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3262         ELFRefKind == AArch64MCExpr::VK_INVALID) {
3263       // No modifier was specified at all; this is the syntax for an ELF basic
3264       // ADR relocation (unfortunately).
3265       Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3266     } else {
3267       return Error(S, "unexpected adr label");
3268     }
3269   }
3270 
3271   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3272   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3273   return ParseStatus::Success;
3274 }
3275 
3276 /// tryParseFPImm - A floating point immediate expression operand.
3277 template <bool AddFPZeroAsLiteral>
3278 ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3279   SMLoc S = getLoc();
3280 
3281   bool Hash = parseOptionalToken(AsmToken::Hash);
3282 
3283   // Handle negation, as that still comes through as a separate token.
3284   bool isNegative = parseOptionalToken(AsmToken::Minus);
3285 
3286   const AsmToken &Tok = getTok();
3287   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3288     if (!Hash)
3289       return ParseStatus::NoMatch;
3290     return TokError("invalid floating point immediate");
3291   }
3292 
3293   // Parse hexadecimal representation.
3294   if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3295     if (Tok.getIntVal() > 255 || isNegative)
3296       return TokError("encoded floating point value out of range");
3297 
3298     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
3299     Operands.push_back(
3300         AArch64Operand::CreateFPImm(F, true, S, getContext()));
3301   } else {
3302     // Parse FP representation.
3303     APFloat RealVal(APFloat::IEEEdouble());
3304     auto StatusOrErr =
3305         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3306     if (errorToBool(StatusOrErr.takeError()))
3307       return TokError("invalid floating point representation");
3308 
3309     if (isNegative)
3310       RealVal.changeSign();
3311 
3312     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3313       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3314       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3315     } else
3316       Operands.push_back(AArch64Operand::CreateFPImm(
3317           RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3318   }
3319 
3320   Lex(); // Eat the token.
3321 
3322   return ParseStatus::Success;
3323 }
3324 
3325 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3326 /// a shift suffix, for example '#1, lsl #12'.
3327 ParseStatus
3328 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3329   SMLoc S = getLoc();
3330 
3331   if (getTok().is(AsmToken::Hash))
3332     Lex(); // Eat '#'
3333   else if (getTok().isNot(AsmToken::Integer))
3334     // Operand should start from # or should be integer, emit error otherwise.
3335     return ParseStatus::NoMatch;
3336 
3337   if (getTok().is(AsmToken::Integer) &&
3338       getLexer().peekTok().is(AsmToken::Colon))
3339     return tryParseImmRange(Operands);
3340 
3341   const MCExpr *Imm = nullptr;
3342   if (parseSymbolicImmVal(Imm))
3343     return ParseStatus::Failure;
3344   else if (getTok().isNot(AsmToken::Comma)) {
3345     Operands.push_back(
3346         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3347     return ParseStatus::Success;
3348   }
3349 
3350   // Eat ','
3351   Lex();
3352   StringRef VecGroup;
3353   if (!parseOptionalVGOperand(Operands, VecGroup)) {
3354     Operands.push_back(
3355         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3356     Operands.push_back(
3357         AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3358     return ParseStatus::Success;
3359   }
3360 
3361   // The optional operand must be "lsl #N" where N is non-negative.
3362   if (!getTok().is(AsmToken::Identifier) ||
3363       !getTok().getIdentifier().equals_insensitive("lsl"))
3364     return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3365 
3366   // Eat 'lsl'
3367   Lex();
3368 
3369   parseOptionalToken(AsmToken::Hash);
3370 
3371   if (getTok().isNot(AsmToken::Integer))
3372     return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3373 
3374   int64_t ShiftAmount = getTok().getIntVal();
3375 
3376   if (ShiftAmount < 0)
3377     return Error(getLoc(), "positive shift amount required");
3378   Lex(); // Eat the number
3379 
3380   // Just in case the optional lsl #0 is used for immediates other than zero.
3381   if (ShiftAmount == 0 && Imm != nullptr) {
3382     Operands.push_back(
3383         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3384     return ParseStatus::Success;
3385   }
3386 
3387   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3388                                                       getLoc(), getContext()));
3389   return ParseStatus::Success;
3390 }
3391 
3392 /// parseCondCodeString - Parse a Condition Code string, optionally returning a
3393 /// suggestion to help common typos.
3394 AArch64CC::CondCode
3395 AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3396   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3397                     .Case("eq", AArch64CC::EQ)
3398                     .Case("ne", AArch64CC::NE)
3399                     .Case("cs", AArch64CC::HS)
3400                     .Case("hs", AArch64CC::HS)
3401                     .Case("cc", AArch64CC::LO)
3402                     .Case("lo", AArch64CC::LO)
3403                     .Case("mi", AArch64CC::MI)
3404                     .Case("pl", AArch64CC::PL)
3405                     .Case("vs", AArch64CC::VS)
3406                     .Case("vc", AArch64CC::VC)
3407                     .Case("hi", AArch64CC::HI)
3408                     .Case("ls", AArch64CC::LS)
3409                     .Case("ge", AArch64CC::GE)
3410                     .Case("lt", AArch64CC::LT)
3411                     .Case("gt", AArch64CC::GT)
3412                     .Case("le", AArch64CC::LE)
3413                     .Case("al", AArch64CC::AL)
3414                     .Case("nv", AArch64CC::NV)
3415                     .Default(AArch64CC::Invalid);
3416 
3417   if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3418     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3419                     .Case("none",  AArch64CC::EQ)
3420                     .Case("any",   AArch64CC::NE)
3421                     .Case("nlast", AArch64CC::HS)
3422                     .Case("last",  AArch64CC::LO)
3423                     .Case("first", AArch64CC::MI)
3424                     .Case("nfrst", AArch64CC::PL)
3425                     .Case("pmore", AArch64CC::HI)
3426                     .Case("plast", AArch64CC::LS)
3427                     .Case("tcont", AArch64CC::GE)
3428                     .Case("tstop", AArch64CC::LT)
3429                     .Default(AArch64CC::Invalid);
3430 
3431     if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3432       Suggestion = "nfrst";
3433   }
3434   return CC;
3435 }
3436 
3437 /// parseCondCode - Parse a Condition Code operand.
3438 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3439                                      bool invertCondCode) {
3440   SMLoc S = getLoc();
3441   const AsmToken &Tok = getTok();
3442   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3443 
3444   StringRef Cond = Tok.getString();
3445   std::string Suggestion;
3446   AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3447   if (CC == AArch64CC::Invalid) {
3448     std::string Msg = "invalid condition code";
3449     if (!Suggestion.empty())
3450       Msg += ", did you mean " + Suggestion + "?";
3451     return TokError(Msg);
3452   }
3453   Lex(); // Eat identifier token.
3454 
3455   if (invertCondCode) {
3456     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3457       return TokError("condition codes AL and NV are invalid for this instruction");
3458     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3459   }
3460 
3461   Operands.push_back(
3462       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3463   return false;
3464 }
3465 
3466 ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3467   const AsmToken &Tok = getTok();
3468   SMLoc S = getLoc();
3469 
3470   if (Tok.isNot(AsmToken::Identifier))
3471     return TokError("invalid operand for instruction");
3472 
3473   unsigned PStateImm = -1;
3474   const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3475   if (!SVCR)
3476     return ParseStatus::NoMatch;
3477   if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3478     PStateImm = SVCR->Encoding;
3479 
3480   Operands.push_back(
3481       AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3482   Lex(); // Eat identifier token.
3483   return ParseStatus::Success;
3484 }
3485 
3486 ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3487   const AsmToken &Tok = getTok();
3488   SMLoc S = getLoc();
3489 
3490   StringRef Name = Tok.getString();
3491 
3492   if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3493     Lex(); // eat "za[.(b|h|s|d)]"
3494     unsigned ElementWidth = 0;
3495     auto DotPosition = Name.find('.');
3496     if (DotPosition != StringRef::npos) {
3497       const auto &KindRes =
3498           parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3499       if (!KindRes)
3500         return TokError(
3501             "Expected the register to be followed by element width suffix");
3502       ElementWidth = KindRes->second;
3503     }
3504     Operands.push_back(AArch64Operand::CreateMatrixRegister(
3505         AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3506         getContext()));
3507     if (getLexer().is(AsmToken::LBrac)) {
3508       // There's no comma after matrix operand, so we can parse the next operand
3509       // immediately.
3510       if (parseOperand(Operands, false, false))
3511         return ParseStatus::NoMatch;
3512     }
3513     return ParseStatus::Success;
3514   }
3515 
3516   // Try to parse matrix register.
3517   unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3518   if (!Reg)
3519     return ParseStatus::NoMatch;
3520 
3521   size_t DotPosition = Name.find('.');
3522   assert(DotPosition != StringRef::npos && "Unexpected register");
3523 
3524   StringRef Head = Name.take_front(DotPosition);
3525   StringRef Tail = Name.drop_front(DotPosition);
3526   StringRef RowOrColumn = Head.take_back();
3527 
3528   MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3529                         .Case("h", MatrixKind::Row)
3530                         .Case("v", MatrixKind::Col)
3531                         .Default(MatrixKind::Tile);
3532 
3533   // Next up, parsing the suffix
3534   const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3535   if (!KindRes)
3536     return TokError(
3537         "Expected the register to be followed by element width suffix");
3538   unsigned ElementWidth = KindRes->second;
3539 
3540   Lex();
3541 
3542   Operands.push_back(AArch64Operand::CreateMatrixRegister(
3543       Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3544 
3545   if (getLexer().is(AsmToken::LBrac)) {
3546     // There's no comma after matrix operand, so we can parse the next operand
3547     // immediately.
3548     if (parseOperand(Operands, false, false))
3549       return ParseStatus::NoMatch;
3550   }
3551   return ParseStatus::Success;
3552 }
3553 
3554 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3555 /// them if present.
3556 ParseStatus
3557 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3558   const AsmToken &Tok = getTok();
3559   std::string LowerID = Tok.getString().lower();
3560   AArch64_AM::ShiftExtendType ShOp =
3561       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3562           .Case("lsl", AArch64_AM::LSL)
3563           .Case("lsr", AArch64_AM::LSR)
3564           .Case("asr", AArch64_AM::ASR)
3565           .Case("ror", AArch64_AM::ROR)
3566           .Case("msl", AArch64_AM::MSL)
3567           .Case("uxtb", AArch64_AM::UXTB)
3568           .Case("uxth", AArch64_AM::UXTH)
3569           .Case("uxtw", AArch64_AM::UXTW)
3570           .Case("uxtx", AArch64_AM::UXTX)
3571           .Case("sxtb", AArch64_AM::SXTB)
3572           .Case("sxth", AArch64_AM::SXTH)
3573           .Case("sxtw", AArch64_AM::SXTW)
3574           .Case("sxtx", AArch64_AM::SXTX)
3575           .Default(AArch64_AM::InvalidShiftExtend);
3576 
3577   if (ShOp == AArch64_AM::InvalidShiftExtend)
3578     return ParseStatus::NoMatch;
3579 
3580   SMLoc S = Tok.getLoc();
3581   Lex();
3582 
3583   bool Hash = parseOptionalToken(AsmToken::Hash);
3584 
3585   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3586     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3587         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3588         ShOp == AArch64_AM::MSL) {
3589       // We expect a number here.
3590       return TokError("expected #imm after shift specifier");
3591     }
3592 
3593     // "extend" type operations don't need an immediate, #0 is implicit.
3594     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3595     Operands.push_back(
3596         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3597     return ParseStatus::Success;
3598   }
3599 
3600   // Make sure we do actually have a number, identifier or a parenthesized
3601   // expression.
3602   SMLoc E = getLoc();
3603   if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3604       !getTok().is(AsmToken::Identifier))
3605     return Error(E, "expected integer shift amount");
3606 
3607   const MCExpr *ImmVal;
3608   if (getParser().parseExpression(ImmVal))
3609     return ParseStatus::Failure;
3610 
3611   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3612   if (!MCE)
3613     return Error(E, "expected constant '#imm' after shift specifier");
3614 
3615   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3616   Operands.push_back(AArch64Operand::CreateShiftExtend(
3617       ShOp, MCE->getValue(), true, S, E, getContext()));
3618   return ParseStatus::Success;
3619 }
3620 
3621 static const struct Extension {
3622   const char *Name;
3623   const FeatureBitset Features;
3624 } ExtensionMap[] = {
3625     {"crc", {AArch64::FeatureCRC}},
3626     {"sm4", {AArch64::FeatureSM4}},
3627     {"sha3", {AArch64::FeatureSHA3}},
3628     {"sha2", {AArch64::FeatureSHA2}},
3629     {"aes", {AArch64::FeatureAES}},
3630     {"crypto", {AArch64::FeatureCrypto}},
3631     {"fp", {AArch64::FeatureFPARMv8}},
3632     {"simd", {AArch64::FeatureNEON}},
3633     {"ras", {AArch64::FeatureRAS}},
3634     {"rasv2", {AArch64::FeatureRASv2}},
3635     {"lse", {AArch64::FeatureLSE}},
3636     {"predres", {AArch64::FeaturePredRes}},
3637     {"predres2", {AArch64::FeatureSPECRES2}},
3638     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3639     {"mte", {AArch64::FeatureMTE}},
3640     {"memtag", {AArch64::FeatureMTE}},
3641     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3642     {"pan", {AArch64::FeaturePAN}},
3643     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3644     {"ccpp", {AArch64::FeatureCCPP}},
3645     {"rcpc", {AArch64::FeatureRCPC}},
3646     {"rng", {AArch64::FeatureRandGen}},
3647     {"sve", {AArch64::FeatureSVE}},
3648     {"sve2", {AArch64::FeatureSVE2}},
3649     {"sve2-aes", {AArch64::FeatureSVE2AES}},
3650     {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3651     {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3652     {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3653     {"sve2p1", {AArch64::FeatureSVE2p1}},
3654     {"b16b16", {AArch64::FeatureB16B16}},
3655     {"ls64", {AArch64::FeatureLS64}},
3656     {"xs", {AArch64::FeatureXS}},
3657     {"pauth", {AArch64::FeaturePAuth}},
3658     {"flagm", {AArch64::FeatureFlagM}},
3659     {"rme", {AArch64::FeatureRME}},
3660     {"sme", {AArch64::FeatureSME}},
3661     {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3662     {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3663     {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3664     {"sme2", {AArch64::FeatureSME2}},
3665     {"sme2p1", {AArch64::FeatureSME2p1}},
3666     {"hbc", {AArch64::FeatureHBC}},
3667     {"mops", {AArch64::FeatureMOPS}},
3668     {"mec", {AArch64::FeatureMEC}},
3669     {"the", {AArch64::FeatureTHE}},
3670     {"d128", {AArch64::FeatureD128}},
3671     {"lse128", {AArch64::FeatureLSE128}},
3672     {"ite", {AArch64::FeatureITE}},
3673     {"cssc", {AArch64::FeatureCSSC}},
3674     {"rcpc3", {AArch64::FeatureRCPC3}},
3675     {"gcs", {AArch64::FeatureGCS}},
3676     {"bf16", {AArch64::FeatureBF16}},
3677     {"compnum", {AArch64::FeatureComplxNum}},
3678     {"dotprod", {AArch64::FeatureDotProd}},
3679     {"f32mm", {AArch64::FeatureMatMulFP32}},
3680     {"f64mm", {AArch64::FeatureMatMulFP64}},
3681     {"fp16", {AArch64::FeatureFullFP16}},
3682     {"fp16fml", {AArch64::FeatureFP16FML}},
3683     {"i8mm", {AArch64::FeatureMatMulInt8}},
3684     {"lor", {AArch64::FeatureLOR}},
3685     {"profile", {AArch64::FeatureSPE}},
3686     // "rdma" is the name documented by binutils for the feature, but
3687     // binutils also accepts incomplete prefixes of features, so "rdm"
3688     // works too. Support both spellings here.
3689     {"rdm", {AArch64::FeatureRDM}},
3690     {"rdma", {AArch64::FeatureRDM}},
3691     {"sb", {AArch64::FeatureSB}},
3692     {"ssbs", {AArch64::FeatureSSBS}},
3693     {"tme", {AArch64::FeatureTME}},
3694     {"fpmr", {AArch64::FeatureFPMR}},
3695     {"fp8", {AArch64::FeatureFP8}},
3696     {"faminmax", {AArch64::FeatureFAMINMAX}},
3697     {"fp8fma", {AArch64::FeatureFP8FMA}},
3698     {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3699     {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3700     {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3701     {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3702     {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3703     {"lut", {AArch64::FeatureLUT}},
3704     {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3705     {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3706     {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3707     {"sme-fa64",  {AArch64::FeatureSMEFA64}},
3708     {"cpa", {AArch64::FeatureCPA}},
3709     {"tlbiw", {AArch64::FeatureTLBIW}},
3710 };
3711 
3712 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3713   if (FBS[AArch64::HasV8_0aOps])
3714     Str += "ARMv8a";
3715   if (FBS[AArch64::HasV8_1aOps])
3716     Str += "ARMv8.1a";
3717   else if (FBS[AArch64::HasV8_2aOps])
3718     Str += "ARMv8.2a";
3719   else if (FBS[AArch64::HasV8_3aOps])
3720     Str += "ARMv8.3a";
3721   else if (FBS[AArch64::HasV8_4aOps])
3722     Str += "ARMv8.4a";
3723   else if (FBS[AArch64::HasV8_5aOps])
3724     Str += "ARMv8.5a";
3725   else if (FBS[AArch64::HasV8_6aOps])
3726     Str += "ARMv8.6a";
3727   else if (FBS[AArch64::HasV8_7aOps])
3728     Str += "ARMv8.7a";
3729   else if (FBS[AArch64::HasV8_8aOps])
3730     Str += "ARMv8.8a";
3731   else if (FBS[AArch64::HasV8_9aOps])
3732     Str += "ARMv8.9a";
3733   else if (FBS[AArch64::HasV9_0aOps])
3734     Str += "ARMv9-a";
3735   else if (FBS[AArch64::HasV9_1aOps])
3736     Str += "ARMv9.1a";
3737   else if (FBS[AArch64::HasV9_2aOps])
3738     Str += "ARMv9.2a";
3739   else if (FBS[AArch64::HasV9_3aOps])
3740     Str += "ARMv9.3a";
3741   else if (FBS[AArch64::HasV9_4aOps])
3742     Str += "ARMv9.4a";
3743   else if (FBS[AArch64::HasV9_5aOps])
3744     Str += "ARMv9.5a";
3745   else if (FBS[AArch64::HasV8_0rOps])
3746     Str += "ARMv8r";
3747   else {
3748     SmallVector<std::string, 2> ExtMatches;
3749     for (const auto& Ext : ExtensionMap) {
3750       // Use & in case multiple features are enabled
3751       if ((FBS & Ext.Features) != FeatureBitset())
3752         ExtMatches.push_back(Ext.Name);
3753     }
3754     Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3755   }
3756 }
3757 
3758 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3759                                       SMLoc S) {
3760   const uint16_t Op2 = Encoding & 7;
3761   const uint16_t Cm = (Encoding & 0x78) >> 3;
3762   const uint16_t Cn = (Encoding & 0x780) >> 7;
3763   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3764 
3765   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3766 
3767   Operands.push_back(
3768       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3769   Operands.push_back(
3770       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3771   Operands.push_back(
3772       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3773   Expr = MCConstantExpr::create(Op2, getContext());
3774   Operands.push_back(
3775       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3776 }
3777 
3778 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3779 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3780 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3781                                    OperandVector &Operands) {
3782   if (Name.contains('.'))
3783     return TokError("invalid operand");
3784 
3785   Mnemonic = Name;
3786   Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3787 
3788   const AsmToken &Tok = getTok();
3789   StringRef Op = Tok.getString();
3790   SMLoc S = Tok.getLoc();
3791 
3792   if (Mnemonic == "ic") {
3793     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3794     if (!IC)
3795       return TokError("invalid operand for IC instruction");
3796     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3797       std::string Str("IC " + std::string(IC->Name) + " requires: ");
3798       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3799       return TokError(Str);
3800     }
3801     createSysAlias(IC->Encoding, Operands, S);
3802   } else if (Mnemonic == "dc") {
3803     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3804     if (!DC)
3805       return TokError("invalid operand for DC instruction");
3806     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3807       std::string Str("DC " + std::string(DC->Name) + " requires: ");
3808       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3809       return TokError(Str);
3810     }
3811     createSysAlias(DC->Encoding, Operands, S);
3812   } else if (Mnemonic == "at") {
3813     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3814     if (!AT)
3815       return TokError("invalid operand for AT instruction");
3816     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3817       std::string Str("AT " + std::string(AT->Name) + " requires: ");
3818       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3819       return TokError(Str);
3820     }
3821     createSysAlias(AT->Encoding, Operands, S);
3822   } else if (Mnemonic == "tlbi") {
3823     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3824     if (!TLBI)
3825       return TokError("invalid operand for TLBI instruction");
3826     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3827       std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3828       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3829       return TokError(Str);
3830     }
3831     createSysAlias(TLBI->Encoding, Operands, S);
3832   } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3833 
3834     if (Op.lower() != "rctx")
3835       return TokError("invalid operand for prediction restriction instruction");
3836 
3837     bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3838     bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3839     bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3840 
3841     if (Mnemonic == "cosp" && !hasSpecres2)
3842       return TokError("COSP requires: predres2");
3843     if (!hasPredres)
3844       return TokError(Mnemonic.upper() + "RCTX requires: predres");
3845 
3846     uint16_t PRCTX_Op2 = Mnemonic == "cfp"    ? 0b100
3847                          : Mnemonic == "dvp"  ? 0b101
3848                          : Mnemonic == "cosp" ? 0b110
3849                          : Mnemonic == "cpp"  ? 0b111
3850                                               : 0;
3851     assert(PRCTX_Op2 &&
3852            "Invalid mnemonic for prediction restriction instruction");
3853     const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3854     const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3855 
3856     createSysAlias(Encoding, Operands, S);
3857   }
3858 
3859   Lex(); // Eat operand.
3860 
3861   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3862   bool HasRegister = false;
3863 
3864   // Check for the optional register operand.
3865   if (parseOptionalToken(AsmToken::Comma)) {
3866     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3867       return TokError("expected register operand");
3868     HasRegister = true;
3869   }
3870 
3871   if (ExpectRegister && !HasRegister)
3872     return TokError("specified " + Mnemonic + " op requires a register");
3873   else if (!ExpectRegister && HasRegister)
3874     return TokError("specified " + Mnemonic + " op does not use a register");
3875 
3876   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3877     return true;
3878 
3879   return false;
3880 }
3881 
3882 /// parseSyspAlias - The TLBIP instructions are simple aliases for
3883 /// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3884 bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3885                                       OperandVector &Operands) {
3886   if (Name.contains('.'))
3887     return TokError("invalid operand");
3888 
3889   Mnemonic = Name;
3890   Operands.push_back(
3891       AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3892 
3893   const AsmToken &Tok = getTok();
3894   StringRef Op = Tok.getString();
3895   SMLoc S = Tok.getLoc();
3896 
3897   if (Mnemonic == "tlbip") {
3898     bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3899     if (HasnXSQualifier) {
3900       Op = Op.drop_back(3);
3901     }
3902     const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3903     if (!TLBIorig)
3904       return TokError("invalid operand for TLBIP instruction");
3905     const AArch64TLBI::TLBI TLBI(
3906         TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3907         TLBIorig->NeedsReg,
3908         HasnXSQualifier
3909             ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3910             : TLBIorig->FeaturesRequired);
3911     if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3912       std::string Name =
3913           std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3914       std::string Str("TLBIP " + Name + " requires: ");
3915       setRequiredFeatureString(TLBI.getRequiredFeatures(), Str);
3916       return TokError(Str);
3917     }
3918     createSysAlias(TLBI.Encoding, Operands, S);
3919   }
3920 
3921   Lex(); // Eat operand.
3922 
3923   if (parseComma())
3924     return true;
3925 
3926   if (Tok.isNot(AsmToken::Identifier))
3927     return TokError("expected register identifier");
3928   auto Result = tryParseSyspXzrPair(Operands);
3929   if (Result.isNoMatch())
3930     Result = tryParseGPRSeqPair(Operands);
3931   if (!Result.isSuccess())
3932     return TokError("specified " + Mnemonic +
3933                     " op requires a pair of registers");
3934 
3935   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3936     return true;
3937 
3938   return false;
3939 }
3940 
3941 ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3942   MCAsmParser &Parser = getParser();
3943   const AsmToken &Tok = getTok();
3944 
3945   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3946     return TokError("'csync' operand expected");
3947   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3948     // Immediate operand.
3949     const MCExpr *ImmVal;
3950     SMLoc ExprLoc = getLoc();
3951     AsmToken IntTok = Tok;
3952     if (getParser().parseExpression(ImmVal))
3953       return ParseStatus::Failure;
3954     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3955     if (!MCE)
3956       return Error(ExprLoc, "immediate value expected for barrier operand");
3957     int64_t Value = MCE->getValue();
3958     if (Mnemonic == "dsb" && Value > 15) {
3959       // This case is a no match here, but it might be matched by the nXS
3960       // variant. Deliberately not unlex the optional '#' as it is not necessary
3961       // to characterize an integer immediate.
3962       Parser.getLexer().UnLex(IntTok);
3963       return ParseStatus::NoMatch;
3964     }
3965     if (Value < 0 || Value > 15)
3966       return Error(ExprLoc, "barrier operand out of range");
3967     auto DB = AArch64DB::lookupDBByEncoding(Value);
3968     Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3969                                                      ExprLoc, getContext(),
3970                                                      false /*hasnXSModifier*/));
3971     return ParseStatus::Success;
3972   }
3973 
3974   if (Tok.isNot(AsmToken::Identifier))
3975     return TokError("invalid operand for instruction");
3976 
3977   StringRef Operand = Tok.getString();
3978   auto TSB = AArch64TSB::lookupTSBByName(Operand);
3979   auto DB = AArch64DB::lookupDBByName(Operand);
3980   // The only valid named option for ISB is 'sy'
3981   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
3982     return TokError("'sy' or #imm operand expected");
3983   // The only valid named option for TSB is 'csync'
3984   if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
3985     return TokError("'csync' operand expected");
3986   if (!DB && !TSB) {
3987     if (Mnemonic == "dsb") {
3988       // This case is a no match here, but it might be matched by the nXS
3989       // variant.
3990       return ParseStatus::NoMatch;
3991     }
3992     return TokError("invalid barrier option name");
3993   }
3994 
3995   Operands.push_back(AArch64Operand::CreateBarrier(
3996       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3997       getContext(), false /*hasnXSModifier*/));
3998   Lex(); // Consume the option
3999 
4000   return ParseStatus::Success;
4001 }
4002 
4003 ParseStatus
4004 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4005   const AsmToken &Tok = getTok();
4006 
4007   assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4008   if (Mnemonic != "dsb")
4009     return ParseStatus::Failure;
4010 
4011   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4012     // Immediate operand.
4013     const MCExpr *ImmVal;
4014     SMLoc ExprLoc = getLoc();
4015     if (getParser().parseExpression(ImmVal))
4016       return ParseStatus::Failure;
4017     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4018     if (!MCE)
4019       return Error(ExprLoc, "immediate value expected for barrier operand");
4020     int64_t Value = MCE->getValue();
4021     // v8.7-A DSB in the nXS variant accepts only the following immediate
4022     // values: 16, 20, 24, 28.
4023     if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4024       return Error(ExprLoc, "barrier operand out of range");
4025     auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4026     Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4027                                                      ExprLoc, getContext(),
4028                                                      true /*hasnXSModifier*/));
4029     return ParseStatus::Success;
4030   }
4031 
4032   if (Tok.isNot(AsmToken::Identifier))
4033     return TokError("invalid operand for instruction");
4034 
4035   StringRef Operand = Tok.getString();
4036   auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4037 
4038   if (!DB)
4039     return TokError("invalid barrier option name");
4040 
4041   Operands.push_back(
4042       AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4043                                     getContext(), true /*hasnXSModifier*/));
4044   Lex(); // Consume the option
4045 
4046   return ParseStatus::Success;
4047 }
4048 
4049 ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4050   const AsmToken &Tok = getTok();
4051 
4052   if (Tok.isNot(AsmToken::Identifier))
4053     return ParseStatus::NoMatch;
4054 
4055   if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4056     return ParseStatus::NoMatch;
4057 
4058   int MRSReg, MSRReg;
4059   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4060   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4061     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4062     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4063   } else
4064     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4065 
4066   unsigned PStateImm = -1;
4067   auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4068   if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4069     PStateImm = PState15->Encoding;
4070   if (!PState15) {
4071     auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4072     if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4073       PStateImm = PState1->Encoding;
4074   }
4075 
4076   Operands.push_back(
4077       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4078                                    PStateImm, getContext()));
4079   Lex(); // Eat identifier
4080 
4081   return ParseStatus::Success;
4082 }
4083 
4084 /// tryParseNeonVectorRegister - Parse a vector register operand.
4085 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4086   if (getTok().isNot(AsmToken::Identifier))
4087     return true;
4088 
4089   SMLoc S = getLoc();
4090   // Check for a vector register specifier first.
4091   StringRef Kind;
4092   MCRegister Reg;
4093   ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4094   if (!Res.isSuccess())
4095     return true;
4096 
4097   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4098   if (!KindRes)
4099     return true;
4100 
4101   unsigned ElementWidth = KindRes->second;
4102   Operands.push_back(
4103       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4104                                       S, getLoc(), getContext()));
4105 
4106   // If there was an explicit qualifier, that goes on as a literal text
4107   // operand.
4108   if (!Kind.empty())
4109     Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4110 
4111   return tryParseVectorIndex(Operands).isFailure();
4112 }
4113 
4114 ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4115   SMLoc SIdx = getLoc();
4116   if (parseOptionalToken(AsmToken::LBrac)) {
4117     const MCExpr *ImmVal;
4118     if (getParser().parseExpression(ImmVal))
4119       return ParseStatus::NoMatch;
4120     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4121     if (!MCE)
4122       return TokError("immediate value expected for vector index");
4123 
4124     SMLoc E = getLoc();
4125 
4126     if (parseToken(AsmToken::RBrac, "']' expected"))
4127       return ParseStatus::Failure;
4128 
4129     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4130                                                          E, getContext()));
4131     return ParseStatus::Success;
4132   }
4133 
4134   return ParseStatus::NoMatch;
4135 }
4136 
4137 // tryParseVectorRegister - Try to parse a vector register name with
4138 // optional kind specifier. If it is a register specifier, eat the token
4139 // and return it.
4140 ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4141                                                      StringRef &Kind,
4142                                                      RegKind MatchKind) {
4143   const AsmToken &Tok = getTok();
4144 
4145   if (Tok.isNot(AsmToken::Identifier))
4146     return ParseStatus::NoMatch;
4147 
4148   StringRef Name = Tok.getString();
4149   // If there is a kind specifier, it's separated from the register name by
4150   // a '.'.
4151   size_t Start = 0, Next = Name.find('.');
4152   StringRef Head = Name.slice(Start, Next);
4153   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4154 
4155   if (RegNum) {
4156     if (Next != StringRef::npos) {
4157       Kind = Name.slice(Next, StringRef::npos);
4158       if (!isValidVectorKind(Kind, MatchKind))
4159         return TokError("invalid vector kind qualifier");
4160     }
4161     Lex(); // Eat the register token.
4162 
4163     Reg = RegNum;
4164     return ParseStatus::Success;
4165   }
4166 
4167   return ParseStatus::NoMatch;
4168 }
4169 
4170 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4171 template <RegKind RK>
4172 ParseStatus
4173 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4174   // Check for a SVE predicate register specifier first.
4175   const SMLoc S = getLoc();
4176   StringRef Kind;
4177   MCRegister RegNum;
4178   auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4179   if (!Res.isSuccess())
4180     return Res;
4181 
4182   const auto &KindRes = parseVectorKind(Kind, RK);
4183   if (!KindRes)
4184     return ParseStatus::NoMatch;
4185 
4186   unsigned ElementWidth = KindRes->second;
4187   Operands.push_back(AArch64Operand::CreateVectorReg(
4188       RegNum, RK, ElementWidth, S,
4189       getLoc(), getContext()));
4190 
4191   if (getLexer().is(AsmToken::LBrac)) {
4192     if (RK == RegKind::SVEPredicateAsCounter) {
4193       ParseStatus ResIndex = tryParseVectorIndex(Operands);
4194       if (ResIndex.isSuccess())
4195         return ParseStatus::Success;
4196     } else {
4197       // Indexed predicate, there's no comma so try parse the next operand
4198       // immediately.
4199       if (parseOperand(Operands, false, false))
4200         return ParseStatus::NoMatch;
4201     }
4202   }
4203 
4204   // Not all predicates are followed by a '/m' or '/z'.
4205   if (getTok().isNot(AsmToken::Slash))
4206     return ParseStatus::Success;
4207 
4208   // But when they do they shouldn't have an element type suffix.
4209   if (!Kind.empty())
4210     return Error(S, "not expecting size suffix");
4211 
4212   // Add a literal slash as operand
4213   Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4214 
4215   Lex(); // Eat the slash.
4216 
4217   // Zeroing or merging?
4218   auto Pred = getTok().getString().lower();
4219   if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4220     return Error(getLoc(), "expecting 'z' predication");
4221 
4222   if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4223     return Error(getLoc(), "expecting 'm' or 'z' predication");
4224 
4225   // Add zero/merge token.
4226   const char *ZM = Pred == "z" ? "z" : "m";
4227   Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4228 
4229   Lex(); // Eat zero/merge token.
4230   return ParseStatus::Success;
4231 }
4232 
4233 /// parseRegister - Parse a register operand.
4234 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4235   // Try for a Neon vector register.
4236   if (!tryParseNeonVectorRegister(Operands))
4237     return false;
4238 
4239   if (tryParseZTOperand(Operands).isSuccess())
4240     return false;
4241 
4242   // Otherwise try for a scalar register.
4243   if (tryParseGPROperand<false>(Operands).isSuccess())
4244     return false;
4245 
4246   return true;
4247 }
4248 
4249 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4250   bool HasELFModifier = false;
4251   AArch64MCExpr::VariantKind RefKind;
4252 
4253   if (parseOptionalToken(AsmToken::Colon)) {
4254     HasELFModifier = true;
4255 
4256     if (getTok().isNot(AsmToken::Identifier))
4257       return TokError("expect relocation specifier in operand after ':'");
4258 
4259     std::string LowerCase = getTok().getIdentifier().lower();
4260     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4261                   .Case("lo12", AArch64MCExpr::VK_LO12)
4262                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4263                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4264                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4265                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4266                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4267                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4268                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4269                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4270                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4271                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4272                   .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4273                   .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4274                   .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4275                   .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4276                   .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4277                   .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4278                   .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4279                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4280                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4281                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4282                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4283                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4284                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4285                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4286                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4287                   .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4288                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4289                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4290                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4291                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4292                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4293                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4294                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4295                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4296                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4297                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
4298                   .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4299                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4300                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
4301                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4302                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4303                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4304                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
4305                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4306                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4307                   .Default(AArch64MCExpr::VK_INVALID);
4308 
4309     if (RefKind == AArch64MCExpr::VK_INVALID)
4310       return TokError("expect relocation specifier in operand after ':'");
4311 
4312     Lex(); // Eat identifier
4313 
4314     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4315       return true;
4316   }
4317 
4318   if (getParser().parseExpression(ImmVal))
4319     return true;
4320 
4321   if (HasELFModifier)
4322     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4323 
4324   return false;
4325 }
4326 
4327 ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4328   if (getTok().isNot(AsmToken::LCurly))
4329     return ParseStatus::NoMatch;
4330 
4331   auto ParseMatrixTile = [this](unsigned &Reg,
4332                                 unsigned &ElementWidth) -> ParseStatus {
4333     StringRef Name = getTok().getString();
4334     size_t DotPosition = Name.find('.');
4335     if (DotPosition == StringRef::npos)
4336       return ParseStatus::NoMatch;
4337 
4338     unsigned RegNum = matchMatrixTileListRegName(Name);
4339     if (!RegNum)
4340       return ParseStatus::NoMatch;
4341 
4342     StringRef Tail = Name.drop_front(DotPosition);
4343     const std::optional<std::pair<int, int>> &KindRes =
4344         parseVectorKind(Tail, RegKind::Matrix);
4345     if (!KindRes)
4346       return TokError(
4347           "Expected the register to be followed by element width suffix");
4348     ElementWidth = KindRes->second;
4349     Reg = RegNum;
4350     Lex(); // Eat the register.
4351     return ParseStatus::Success;
4352   };
4353 
4354   SMLoc S = getLoc();
4355   auto LCurly = getTok();
4356   Lex(); // Eat left bracket token.
4357 
4358   // Empty matrix list
4359   if (parseOptionalToken(AsmToken::RCurly)) {
4360     Operands.push_back(AArch64Operand::CreateMatrixTileList(
4361         /*RegMask=*/0, S, getLoc(), getContext()));
4362     return ParseStatus::Success;
4363   }
4364 
4365   // Try parse {za} alias early
4366   if (getTok().getString().equals_insensitive("za")) {
4367     Lex(); // Eat 'za'
4368 
4369     if (parseToken(AsmToken::RCurly, "'}' expected"))
4370       return ParseStatus::Failure;
4371 
4372     Operands.push_back(AArch64Operand::CreateMatrixTileList(
4373         /*RegMask=*/0xFF, S, getLoc(), getContext()));
4374     return ParseStatus::Success;
4375   }
4376 
4377   SMLoc TileLoc = getLoc();
4378 
4379   unsigned FirstReg, ElementWidth;
4380   auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4381   if (!ParseRes.isSuccess()) {
4382     getLexer().UnLex(LCurly);
4383     return ParseRes;
4384   }
4385 
4386   const MCRegisterInfo *RI = getContext().getRegisterInfo();
4387 
4388   unsigned PrevReg = FirstReg;
4389 
4390   SmallSet<unsigned, 8> DRegs;
4391   AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4392 
4393   SmallSet<unsigned, 8> SeenRegs;
4394   SeenRegs.insert(FirstReg);
4395 
4396   while (parseOptionalToken(AsmToken::Comma)) {
4397     TileLoc = getLoc();
4398     unsigned Reg, NextElementWidth;
4399     ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4400     if (!ParseRes.isSuccess())
4401       return ParseRes;
4402 
4403     // Element size must match on all regs in the list.
4404     if (ElementWidth != NextElementWidth)
4405       return Error(TileLoc, "mismatched register size suffix");
4406 
4407     if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4408       Warning(TileLoc, "tile list not in ascending order");
4409 
4410     if (SeenRegs.contains(Reg))
4411       Warning(TileLoc, "duplicate tile in list");
4412     else {
4413       SeenRegs.insert(Reg);
4414       AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4415     }
4416 
4417     PrevReg = Reg;
4418   }
4419 
4420   if (parseToken(AsmToken::RCurly, "'}' expected"))
4421     return ParseStatus::Failure;
4422 
4423   unsigned RegMask = 0;
4424   for (auto Reg : DRegs)
4425     RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4426                        RI->getEncodingValue(AArch64::ZAD0));
4427   Operands.push_back(
4428       AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4429 
4430   return ParseStatus::Success;
4431 }
4432 
4433 template <RegKind VectorKind>
4434 ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4435                                                  bool ExpectMatch) {
4436   MCAsmParser &Parser = getParser();
4437   if (!getTok().is(AsmToken::LCurly))
4438     return ParseStatus::NoMatch;
4439 
4440   // Wrapper around parse function
4441   auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4442                             bool NoMatchIsError) -> ParseStatus {
4443     auto RegTok = getTok();
4444     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4445     if (ParseRes.isSuccess()) {
4446       if (parseVectorKind(Kind, VectorKind))
4447         return ParseRes;
4448       llvm_unreachable("Expected a valid vector kind");
4449     }
4450 
4451     if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4452         RegTok.getString().equals_insensitive("zt0"))
4453       return ParseStatus::NoMatch;
4454 
4455     if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4456         (ParseRes.isNoMatch() && NoMatchIsError &&
4457          !RegTok.getString().starts_with_insensitive("za")))
4458       return Error(Loc, "vector register expected");
4459 
4460     return ParseStatus::NoMatch;
4461   };
4462 
4463   int NumRegs = getNumRegsForRegKind(VectorKind);
4464   SMLoc S = getLoc();
4465   auto LCurly = getTok();
4466   Lex(); // Eat left bracket token.
4467 
4468   StringRef Kind;
4469   MCRegister FirstReg;
4470   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4471 
4472   // Put back the original left bracket if there was no match, so that
4473   // different types of list-operands can be matched (e.g. SVE, Neon).
4474   if (ParseRes.isNoMatch())
4475     Parser.getLexer().UnLex(LCurly);
4476 
4477   if (!ParseRes.isSuccess())
4478     return ParseRes;
4479 
4480   int64_t PrevReg = FirstReg;
4481   unsigned Count = 1;
4482 
4483   int Stride = 1;
4484   if (parseOptionalToken(AsmToken::Minus)) {
4485     SMLoc Loc = getLoc();
4486     StringRef NextKind;
4487 
4488     MCRegister Reg;
4489     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4490     if (!ParseRes.isSuccess())
4491       return ParseRes;
4492 
4493     // Any Kind suffices must match on all regs in the list.
4494     if (Kind != NextKind)
4495       return Error(Loc, "mismatched register size suffix");
4496 
4497     unsigned Space =
4498         (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4499 
4500     if (Space == 0 || Space > 3)
4501       return Error(Loc, "invalid number of vectors");
4502 
4503     Count += Space;
4504   }
4505   else {
4506     bool HasCalculatedStride = false;
4507     while (parseOptionalToken(AsmToken::Comma)) {
4508       SMLoc Loc = getLoc();
4509       StringRef NextKind;
4510       MCRegister Reg;
4511       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4512       if (!ParseRes.isSuccess())
4513         return ParseRes;
4514 
4515       // Any Kind suffices must match on all regs in the list.
4516       if (Kind != NextKind)
4517         return Error(Loc, "mismatched register size suffix");
4518 
4519       unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4520       unsigned PrevRegVal =
4521           getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4522       if (!HasCalculatedStride) {
4523         Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4524                                        : (RegVal + NumRegs - PrevRegVal);
4525         HasCalculatedStride = true;
4526       }
4527 
4528       // Register must be incremental (with a wraparound at last register).
4529       if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4530         return Error(Loc, "registers must have the same sequential stride");
4531 
4532       PrevReg = Reg;
4533       ++Count;
4534     }
4535   }
4536 
4537   if (parseToken(AsmToken::RCurly, "'}' expected"))
4538     return ParseStatus::Failure;
4539 
4540   if (Count > 4)
4541     return Error(S, "invalid number of vectors");
4542 
4543   unsigned NumElements = 0;
4544   unsigned ElementWidth = 0;
4545   if (!Kind.empty()) {
4546     if (const auto &VK = parseVectorKind(Kind, VectorKind))
4547       std::tie(NumElements, ElementWidth) = *VK;
4548   }
4549 
4550   Operands.push_back(AArch64Operand::CreateVectorList(
4551       FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4552       getLoc(), getContext()));
4553 
4554   return ParseStatus::Success;
4555 }
4556 
4557 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4558 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4559   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4560   if (!ParseRes.isSuccess())
4561     return true;
4562 
4563   return tryParseVectorIndex(Operands).isFailure();
4564 }
4565 
4566 ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4567   SMLoc StartLoc = getLoc();
4568 
4569   MCRegister RegNum;
4570   ParseStatus Res = tryParseScalarRegister(RegNum);
4571   if (!Res.isSuccess())
4572     return Res;
4573 
4574   if (!parseOptionalToken(AsmToken::Comma)) {
4575     Operands.push_back(AArch64Operand::CreateReg(
4576         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4577     return ParseStatus::Success;
4578   }
4579 
4580   parseOptionalToken(AsmToken::Hash);
4581 
4582   if (getTok().isNot(AsmToken::Integer))
4583     return Error(getLoc(), "index must be absent or #0");
4584 
4585   const MCExpr *ImmVal;
4586   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4587       cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4588     return Error(getLoc(), "index must be absent or #0");
4589 
4590   Operands.push_back(AArch64Operand::CreateReg(
4591       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4592   return ParseStatus::Success;
4593 }
4594 
4595 ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4596   SMLoc StartLoc = getLoc();
4597   const AsmToken &Tok = getTok();
4598   std::string Name = Tok.getString().lower();
4599 
4600   unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4601 
4602   if (RegNum == 0)
4603     return ParseStatus::NoMatch;
4604 
4605   Operands.push_back(AArch64Operand::CreateReg(
4606       RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4607   Lex(); // Eat register.
4608 
4609   // Check if register is followed by an index
4610   if (parseOptionalToken(AsmToken::LBrac)) {
4611     Operands.push_back(
4612         AArch64Operand::CreateToken("[", getLoc(), getContext()));
4613     const MCExpr *ImmVal;
4614     if (getParser().parseExpression(ImmVal))
4615       return ParseStatus::NoMatch;
4616     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4617     if (!MCE)
4618       return TokError("immediate value expected for vector index");
4619     Operands.push_back(AArch64Operand::CreateImm(
4620         MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4621         getLoc(), getContext()));
4622     if (parseOptionalToken(AsmToken::Comma))
4623       if (parseOptionalMulOperand(Operands))
4624         return ParseStatus::Failure;
4625     if (parseToken(AsmToken::RBrac, "']' expected"))
4626       return ParseStatus::Failure;
4627     Operands.push_back(
4628         AArch64Operand::CreateToken("]", getLoc(), getContext()));
4629   }
4630   return ParseStatus::Success;
4631 }
4632 
4633 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4634 ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4635   SMLoc StartLoc = getLoc();
4636 
4637   MCRegister RegNum;
4638   ParseStatus Res = tryParseScalarRegister(RegNum);
4639   if (!Res.isSuccess())
4640     return Res;
4641 
4642   // No shift/extend is the default.
4643   if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4644     Operands.push_back(AArch64Operand::CreateReg(
4645         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4646     return ParseStatus::Success;
4647   }
4648 
4649   // Eat the comma
4650   Lex();
4651 
4652   // Match the shift
4653   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4654   Res = tryParseOptionalShiftExtend(ExtOpnd);
4655   if (!Res.isSuccess())
4656     return Res;
4657 
4658   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4659   Operands.push_back(AArch64Operand::CreateReg(
4660       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4661       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4662       Ext->hasShiftExtendAmount()));
4663 
4664   return ParseStatus::Success;
4665 }
4666 
4667 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4668   MCAsmParser &Parser = getParser();
4669 
4670   // Some SVE instructions have a decoration after the immediate, i.e.
4671   // "mul vl". We parse them here and add tokens, which must be present in the
4672   // asm string in the tablegen instruction.
4673   bool NextIsVL =
4674       Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4675   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4676   if (!getTok().getString().equals_insensitive("mul") ||
4677       !(NextIsVL || NextIsHash))
4678     return true;
4679 
4680   Operands.push_back(
4681       AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4682   Lex(); // Eat the "mul"
4683 
4684   if (NextIsVL) {
4685     Operands.push_back(
4686         AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4687     Lex(); // Eat the "vl"
4688     return false;
4689   }
4690 
4691   if (NextIsHash) {
4692     Lex(); // Eat the #
4693     SMLoc S = getLoc();
4694 
4695     // Parse immediate operand.
4696     const MCExpr *ImmVal;
4697     if (!Parser.parseExpression(ImmVal))
4698       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4699         Operands.push_back(AArch64Operand::CreateImm(
4700             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4701             getContext()));
4702         return false;
4703       }
4704   }
4705 
4706   return Error(getLoc(), "expected 'vl' or '#<imm>'");
4707 }
4708 
4709 bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4710                                               StringRef &VecGroup) {
4711   MCAsmParser &Parser = getParser();
4712   auto Tok = Parser.getTok();
4713   if (Tok.isNot(AsmToken::Identifier))
4714     return true;
4715 
4716   StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4717                      .Case("vgx2", "vgx2")
4718                      .Case("vgx4", "vgx4")
4719                      .Default("");
4720 
4721   if (VG.empty())
4722     return true;
4723 
4724   VecGroup = VG;
4725   Parser.Lex(); // Eat vgx[2|4]
4726   return false;
4727 }
4728 
4729 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4730   auto Tok = getTok();
4731   if (Tok.isNot(AsmToken::Identifier))
4732     return true;
4733 
4734   auto Keyword = Tok.getString();
4735   Keyword = StringSwitch<StringRef>(Keyword.lower())
4736                 .Case("sm", "sm")
4737                 .Case("za", "za")
4738                 .Default(Keyword);
4739   Operands.push_back(
4740       AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4741 
4742   Lex();
4743   return false;
4744 }
4745 
4746 /// parseOperand - Parse a arm instruction operand.  For now this parses the
4747 /// operand regardless of the mnemonic.
4748 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4749                                   bool invertCondCode) {
4750   MCAsmParser &Parser = getParser();
4751 
4752   ParseStatus ResTy =
4753       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4754 
4755   // Check if the current operand has a custom associated parser, if so, try to
4756   // custom parse the operand, or fallback to the general approach.
4757   if (ResTy.isSuccess())
4758     return false;
4759   // If there wasn't a custom match, try the generic matcher below. Otherwise,
4760   // there was a match, but an error occurred, in which case, just return that
4761   // the operand parsing failed.
4762   if (ResTy.isFailure())
4763     return true;
4764 
4765   // Nothing custom, so do general case parsing.
4766   SMLoc S, E;
4767   switch (getLexer().getKind()) {
4768   default: {
4769     SMLoc S = getLoc();
4770     const MCExpr *Expr;
4771     if (parseSymbolicImmVal(Expr))
4772       return Error(S, "invalid operand");
4773 
4774     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4775     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4776     return false;
4777   }
4778   case AsmToken::LBrac: {
4779     Operands.push_back(
4780         AArch64Operand::CreateToken("[", getLoc(), getContext()));
4781     Lex(); // Eat '['
4782 
4783     // There's no comma after a '[', so we can parse the next operand
4784     // immediately.
4785     return parseOperand(Operands, false, false);
4786   }
4787   case AsmToken::LCurly: {
4788     if (!parseNeonVectorList(Operands))
4789       return false;
4790 
4791     Operands.push_back(
4792         AArch64Operand::CreateToken("{", getLoc(), getContext()));
4793     Lex(); // Eat '{'
4794 
4795     // There's no comma after a '{', so we can parse the next operand
4796     // immediately.
4797     return parseOperand(Operands, false, false);
4798   }
4799   case AsmToken::Identifier: {
4800     // See if this is a "VG" decoration used by SME instructions.
4801     StringRef VecGroup;
4802     if (!parseOptionalVGOperand(Operands, VecGroup)) {
4803       Operands.push_back(
4804           AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4805       return false;
4806     }
4807     // If we're expecting a Condition Code operand, then just parse that.
4808     if (isCondCode)
4809       return parseCondCode(Operands, invertCondCode);
4810 
4811     // If it's a register name, parse it.
4812     if (!parseRegister(Operands))
4813       return false;
4814 
4815     // See if this is a "mul vl" decoration or "mul #<int>" operand used
4816     // by SVE instructions.
4817     if (!parseOptionalMulOperand(Operands))
4818       return false;
4819 
4820     // This could be an optional "shift" or "extend" operand.
4821     ParseStatus GotShift = tryParseOptionalShiftExtend(Operands);
4822     // We can only continue if no tokens were eaten.
4823     if (!GotShift.isNoMatch())
4824       return GotShift.isFailure();
4825 
4826     // If this is a two-word mnemonic, parse its special keyword
4827     // operand as an identifier.
4828     if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4829         Mnemonic == "gcsb")
4830       return parseKeywordOperand(Operands);
4831 
4832     // This was not a register so parse other operands that start with an
4833     // identifier (like labels) as expressions and create them as immediates.
4834     const MCExpr *IdVal;
4835     S = getLoc();
4836     if (getParser().parseExpression(IdVal))
4837       return true;
4838     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4839     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4840     return false;
4841   }
4842   case AsmToken::Integer:
4843   case AsmToken::Real:
4844   case AsmToken::Hash: {
4845     // #42 -> immediate.
4846     S = getLoc();
4847 
4848     parseOptionalToken(AsmToken::Hash);
4849 
4850     // Parse a negative sign
4851     bool isNegative = false;
4852     if (getTok().is(AsmToken::Minus)) {
4853       isNegative = true;
4854       // We need to consume this token only when we have a Real, otherwise
4855       // we let parseSymbolicImmVal take care of it
4856       if (Parser.getLexer().peekTok().is(AsmToken::Real))
4857         Lex();
4858     }
4859 
4860     // The only Real that should come through here is a literal #0.0 for
4861     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4862     // so convert the value.
4863     const AsmToken &Tok = getTok();
4864     if (Tok.is(AsmToken::Real)) {
4865       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4866       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4867       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4868           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4869           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4870         return TokError("unexpected floating point literal");
4871       else if (IntVal != 0 || isNegative)
4872         return TokError("expected floating-point constant #0.0");
4873       Lex(); // Eat the token.
4874 
4875       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4876       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4877       return false;
4878     }
4879 
4880     const MCExpr *ImmVal;
4881     if (parseSymbolicImmVal(ImmVal))
4882       return true;
4883 
4884     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4885     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4886     return false;
4887   }
4888   case AsmToken::Equal: {
4889     SMLoc Loc = getLoc();
4890     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4891       return TokError("unexpected token in operand");
4892     Lex(); // Eat '='
4893     const MCExpr *SubExprVal;
4894     if (getParser().parseExpression(SubExprVal))
4895       return true;
4896 
4897     if (Operands.size() < 2 ||
4898         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4899       return Error(Loc, "Only valid when first operand is register");
4900 
4901     bool IsXReg =
4902         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4903             Operands[1]->getReg());
4904 
4905     MCContext& Ctx = getContext();
4906     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4907     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4908     if (isa<MCConstantExpr>(SubExprVal)) {
4909       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4910       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4911       while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4912         ShiftAmt += 16;
4913         Imm >>= 16;
4914       }
4915       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4916         Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4917         Operands.push_back(AArch64Operand::CreateImm(
4918             MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4919         if (ShiftAmt)
4920           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4921                      ShiftAmt, true, S, E, Ctx));
4922         return false;
4923       }
4924       APInt Simm = APInt(64, Imm << ShiftAmt);
4925       // check if the immediate is an unsigned or signed 32-bit int for W regs
4926       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4927         return Error(Loc, "Immediate too large for register");
4928     }
4929     // If it is a label or an imm that cannot fit in a movz, put it into CP.
4930     const MCExpr *CPLoc =
4931         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4932     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4933     return false;
4934   }
4935   }
4936 }
4937 
4938 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4939   const MCExpr *Expr = nullptr;
4940   SMLoc L = getLoc();
4941   if (check(getParser().parseExpression(Expr), L, "expected expression"))
4942     return true;
4943   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4944   if (check(!Value, L, "expected constant expression"))
4945     return true;
4946   Out = Value->getValue();
4947   return false;
4948 }
4949 
4950 bool AArch64AsmParser::parseComma() {
4951   if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4952     return true;
4953   // Eat the comma
4954   Lex();
4955   return false;
4956 }
4957 
4958 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4959                                             unsigned First, unsigned Last) {
4960   MCRegister Reg;
4961   SMLoc Start, End;
4962   if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
4963     return true;
4964 
4965   // Special handling for FP and LR; they aren't linearly after x28 in
4966   // the registers enum.
4967   unsigned RangeEnd = Last;
4968   if (Base == AArch64::X0) {
4969     if (Last == AArch64::FP) {
4970       RangeEnd = AArch64::X28;
4971       if (Reg == AArch64::FP) {
4972         Out = 29;
4973         return false;
4974       }
4975     }
4976     if (Last == AArch64::LR) {
4977       RangeEnd = AArch64::X28;
4978       if (Reg == AArch64::FP) {
4979         Out = 29;
4980         return false;
4981       } else if (Reg == AArch64::LR) {
4982         Out = 30;
4983         return false;
4984       }
4985     }
4986   }
4987 
4988   if (check(Reg < First || Reg > RangeEnd, Start,
4989             Twine("expected register in range ") +
4990                 AArch64InstPrinter::getRegisterName(First) + " to " +
4991                 AArch64InstPrinter::getRegisterName(Last)))
4992     return true;
4993   Out = Reg - Base;
4994   return false;
4995 }
4996 
4997 bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
4998                                     const MCParsedAsmOperand &Op2) const {
4999   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5000   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5001 
5002   if (AOp1.isVectorList() && AOp2.isVectorList())
5003     return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5004            AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5005            AOp1.getVectorListStride() == AOp2.getVectorListStride();
5006 
5007   if (!AOp1.isReg() || !AOp2.isReg())
5008     return false;
5009 
5010   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5011       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5012     return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5013 
5014   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5015          "Testing equality of non-scalar registers not supported");
5016 
5017   // Check if a registers match their sub/super register classes.
5018   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5019     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5020   if (AOp1.getRegEqualityTy() == EqualsSubReg)
5021     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5022   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5023     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5024   if (AOp2.getRegEqualityTy() == EqualsSubReg)
5025     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5026 
5027   return false;
5028 }
5029 
5030 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
5031 /// operands.
5032 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
5033                                         StringRef Name, SMLoc NameLoc,
5034                                         OperandVector &Operands) {
5035   Name = StringSwitch<StringRef>(Name.lower())
5036              .Case("beq", "b.eq")
5037              .Case("bne", "b.ne")
5038              .Case("bhs", "b.hs")
5039              .Case("bcs", "b.cs")
5040              .Case("blo", "b.lo")
5041              .Case("bcc", "b.cc")
5042              .Case("bmi", "b.mi")
5043              .Case("bpl", "b.pl")
5044              .Case("bvs", "b.vs")
5045              .Case("bvc", "b.vc")
5046              .Case("bhi", "b.hi")
5047              .Case("bls", "b.ls")
5048              .Case("bge", "b.ge")
5049              .Case("blt", "b.lt")
5050              .Case("bgt", "b.gt")
5051              .Case("ble", "b.le")
5052              .Case("bal", "b.al")
5053              .Case("bnv", "b.nv")
5054              .Default(Name);
5055 
5056   // First check for the AArch64-specific .req directive.
5057   if (getTok().is(AsmToken::Identifier) &&
5058       getTok().getIdentifier().lower() == ".req") {
5059     parseDirectiveReq(Name, NameLoc);
5060     // We always return 'error' for this, as we're done with this
5061     // statement and don't need to match the 'instruction."
5062     return true;
5063   }
5064 
5065   // Create the leading tokens for the mnemonic, split by '.' characters.
5066   size_t Start = 0, Next = Name.find('.');
5067   StringRef Head = Name.slice(Start, Next);
5068 
5069   // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5070   // the SYS instruction.
5071   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5072       Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5073     return parseSysAlias(Head, NameLoc, Operands);
5074 
5075   // TLBIP instructions are aliases for the SYSP instruction.
5076   if (Head == "tlbip")
5077     return parseSyspAlias(Head, NameLoc, Operands);
5078 
5079   Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5080   Mnemonic = Head;
5081 
5082   // Handle condition codes for a branch mnemonic
5083   if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5084     Start = Next;
5085     Next = Name.find('.', Start + 1);
5086     Head = Name.slice(Start + 1, Next);
5087 
5088     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5089                                             (Head.data() - Name.data()));
5090     std::string Suggestion;
5091     AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5092     if (CC == AArch64CC::Invalid) {
5093       std::string Msg = "invalid condition code";
5094       if (!Suggestion.empty())
5095         Msg += ", did you mean " + Suggestion + "?";
5096       return Error(SuffixLoc, Msg);
5097     }
5098     Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5099                                                    /*IsSuffix=*/true));
5100     Operands.push_back(
5101         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5102   }
5103 
5104   // Add the remaining tokens in the mnemonic.
5105   while (Next != StringRef::npos) {
5106     Start = Next;
5107     Next = Name.find('.', Start + 1);
5108     Head = Name.slice(Start, Next);
5109     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5110                                             (Head.data() - Name.data()) + 1);
5111     Operands.push_back(AArch64Operand::CreateToken(
5112         Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5113   }
5114 
5115   // Conditional compare instructions have a Condition Code operand, which needs
5116   // to be parsed and an immediate operand created.
5117   bool condCodeFourthOperand =
5118       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5119        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5120        Head == "csinc" || Head == "csinv" || Head == "csneg");
5121 
5122   // These instructions are aliases to some of the conditional select
5123   // instructions. However, the condition code is inverted in the aliased
5124   // instruction.
5125   //
5126   // FIXME: Is this the correct way to handle these? Or should the parser
5127   //        generate the aliased instructions directly?
5128   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5129   bool condCodeThirdOperand =
5130       (Head == "cinc" || Head == "cinv" || Head == "cneg");
5131 
5132   // Read the remaining operands.
5133   if (getLexer().isNot(AsmToken::EndOfStatement)) {
5134 
5135     unsigned N = 1;
5136     do {
5137       // Parse and remember the operand.
5138       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5139                                      (N == 3 && condCodeThirdOperand) ||
5140                                      (N == 2 && condCodeSecondOperand),
5141                        condCodeSecondOperand || condCodeThirdOperand)) {
5142         return true;
5143       }
5144 
5145       // After successfully parsing some operands there are three special cases
5146       // to consider (i.e. notional operands not separated by commas). Two are
5147       // due to memory specifiers:
5148       //  + An RBrac will end an address for load/store/prefetch
5149       //  + An '!' will indicate a pre-indexed operation.
5150       //
5151       // And a further case is '}', which ends a group of tokens specifying the
5152       // SME accumulator array 'ZA' or tile vector, i.e.
5153       //
5154       //   '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5155       //
5156       // It's someone else's responsibility to make sure these tokens are sane
5157       // in the given context!
5158 
5159       if (parseOptionalToken(AsmToken::RBrac))
5160         Operands.push_back(
5161             AArch64Operand::CreateToken("]", getLoc(), getContext()));
5162       if (parseOptionalToken(AsmToken::Exclaim))
5163         Operands.push_back(
5164             AArch64Operand::CreateToken("!", getLoc(), getContext()));
5165       if (parseOptionalToken(AsmToken::RCurly))
5166         Operands.push_back(
5167             AArch64Operand::CreateToken("}", getLoc(), getContext()));
5168 
5169       ++N;
5170     } while (parseOptionalToken(AsmToken::Comma));
5171   }
5172 
5173   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5174     return true;
5175 
5176   return false;
5177 }
5178 
5179 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5180   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5181   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5182          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5183          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5184          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5185          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5186          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5187 }
5188 
5189 // FIXME: This entire function is a giant hack to provide us with decent
5190 // operand range validation/diagnostics until TableGen/MC can be extended
5191 // to support autogeneration of this kind of validation.
5192 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5193                                            SmallVectorImpl<SMLoc> &Loc) {
5194   const MCRegisterInfo *RI = getContext().getRegisterInfo();
5195   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5196 
5197   // A prefix only applies to the instruction following it.  Here we extract
5198   // prefix information for the next instruction before validating the current
5199   // one so that in the case of failure we don't erronously continue using the
5200   // current prefix.
5201   PrefixInfo Prefix = NextPrefix;
5202   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5203 
5204   // Before validating the instruction in isolation we run through the rules
5205   // applicable when it follows a prefix instruction.
5206   // NOTE: brk & hlt can be prefixed but require no additional validation.
5207   if (Prefix.isActive() &&
5208       (Inst.getOpcode() != AArch64::BRK) &&
5209       (Inst.getOpcode() != AArch64::HLT)) {
5210 
5211     // Prefixed intructions must have a destructive operand.
5212     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5213         AArch64::NotDestructive)
5214       return Error(IDLoc, "instruction is unpredictable when following a"
5215                    " movprfx, suggest replacing movprfx with mov");
5216 
5217     // Destination operands must match.
5218     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5219       return Error(Loc[0], "instruction is unpredictable when following a"
5220                    " movprfx writing to a different destination");
5221 
5222     // Destination operand must not be used in any other location.
5223     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5224       if (Inst.getOperand(i).isReg() &&
5225           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5226           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5227         return Error(Loc[0], "instruction is unpredictable when following a"
5228                      " movprfx and destination also used as non-destructive"
5229                      " source");
5230     }
5231 
5232     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5233     if (Prefix.isPredicated()) {
5234       int PgIdx = -1;
5235 
5236       // Find the instructions general predicate.
5237       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5238         if (Inst.getOperand(i).isReg() &&
5239             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5240           PgIdx = i;
5241           break;
5242         }
5243 
5244       // Instruction must be predicated if the movprfx is predicated.
5245       if (PgIdx == -1 ||
5246           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5247         return Error(IDLoc, "instruction is unpredictable when following a"
5248                      " predicated movprfx, suggest using unpredicated movprfx");
5249 
5250       // Instruction must use same general predicate as the movprfx.
5251       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5252         return Error(IDLoc, "instruction is unpredictable when following a"
5253                      " predicated movprfx using a different general predicate");
5254 
5255       // Instruction element type must match the movprfx.
5256       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5257         return Error(IDLoc, "instruction is unpredictable when following a"
5258                      " predicated movprfx with a different element size");
5259     }
5260   }
5261 
5262   // Check for indexed addressing modes w/ the base register being the
5263   // same as a destination/source register or pair load where
5264   // the Rt == Rt2. All of those are undefined behaviour.
5265   switch (Inst.getOpcode()) {
5266   case AArch64::LDPSWpre:
5267   case AArch64::LDPWpost:
5268   case AArch64::LDPWpre:
5269   case AArch64::LDPXpost:
5270   case AArch64::LDPXpre: {
5271     unsigned Rt = Inst.getOperand(1).getReg();
5272     unsigned Rt2 = Inst.getOperand(2).getReg();
5273     unsigned Rn = Inst.getOperand(3).getReg();
5274     if (RI->isSubRegisterEq(Rn, Rt))
5275       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5276                            "is also a destination");
5277     if (RI->isSubRegisterEq(Rn, Rt2))
5278       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5279                            "is also a destination");
5280     [[fallthrough]];
5281   }
5282   case AArch64::LDR_ZA:
5283   case AArch64::STR_ZA: {
5284     if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5285         Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5286       return Error(Loc[1],
5287                    "unpredictable instruction, immediate and offset mismatch.");
5288     break;
5289   }
5290   case AArch64::LDPDi:
5291   case AArch64::LDPQi:
5292   case AArch64::LDPSi:
5293   case AArch64::LDPSWi:
5294   case AArch64::LDPWi:
5295   case AArch64::LDPXi: {
5296     unsigned Rt = Inst.getOperand(0).getReg();
5297     unsigned Rt2 = Inst.getOperand(1).getReg();
5298     if (Rt == Rt2)
5299       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5300     break;
5301   }
5302   case AArch64::LDPDpost:
5303   case AArch64::LDPDpre:
5304   case AArch64::LDPQpost:
5305   case AArch64::LDPQpre:
5306   case AArch64::LDPSpost:
5307   case AArch64::LDPSpre:
5308   case AArch64::LDPSWpost: {
5309     unsigned Rt = Inst.getOperand(1).getReg();
5310     unsigned Rt2 = Inst.getOperand(2).getReg();
5311     if (Rt == Rt2)
5312       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5313     break;
5314   }
5315   case AArch64::STPDpost:
5316   case AArch64::STPDpre:
5317   case AArch64::STPQpost:
5318   case AArch64::STPQpre:
5319   case AArch64::STPSpost:
5320   case AArch64::STPSpre:
5321   case AArch64::STPWpost:
5322   case AArch64::STPWpre:
5323   case AArch64::STPXpost:
5324   case AArch64::STPXpre: {
5325     unsigned Rt = Inst.getOperand(1).getReg();
5326     unsigned Rt2 = Inst.getOperand(2).getReg();
5327     unsigned Rn = Inst.getOperand(3).getReg();
5328     if (RI->isSubRegisterEq(Rn, Rt))
5329       return Error(Loc[0], "unpredictable STP instruction, writeback base "
5330                            "is also a source");
5331     if (RI->isSubRegisterEq(Rn, Rt2))
5332       return Error(Loc[1], "unpredictable STP instruction, writeback base "
5333                            "is also a source");
5334     break;
5335   }
5336   case AArch64::LDRBBpre:
5337   case AArch64::LDRBpre:
5338   case AArch64::LDRHHpre:
5339   case AArch64::LDRHpre:
5340   case AArch64::LDRSBWpre:
5341   case AArch64::LDRSBXpre:
5342   case AArch64::LDRSHWpre:
5343   case AArch64::LDRSHXpre:
5344   case AArch64::LDRSWpre:
5345   case AArch64::LDRWpre:
5346   case AArch64::LDRXpre:
5347   case AArch64::LDRBBpost:
5348   case AArch64::LDRBpost:
5349   case AArch64::LDRHHpost:
5350   case AArch64::LDRHpost:
5351   case AArch64::LDRSBWpost:
5352   case AArch64::LDRSBXpost:
5353   case AArch64::LDRSHWpost:
5354   case AArch64::LDRSHXpost:
5355   case AArch64::LDRSWpost:
5356   case AArch64::LDRWpost:
5357   case AArch64::LDRXpost: {
5358     unsigned Rt = Inst.getOperand(1).getReg();
5359     unsigned Rn = Inst.getOperand(2).getReg();
5360     if (RI->isSubRegisterEq(Rn, Rt))
5361       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5362                            "is also a source");
5363     break;
5364   }
5365   case AArch64::STRBBpost:
5366   case AArch64::STRBpost:
5367   case AArch64::STRHHpost:
5368   case AArch64::STRHpost:
5369   case AArch64::STRWpost:
5370   case AArch64::STRXpost:
5371   case AArch64::STRBBpre:
5372   case AArch64::STRBpre:
5373   case AArch64::STRHHpre:
5374   case AArch64::STRHpre:
5375   case AArch64::STRWpre:
5376   case AArch64::STRXpre: {
5377     unsigned Rt = Inst.getOperand(1).getReg();
5378     unsigned Rn = Inst.getOperand(2).getReg();
5379     if (RI->isSubRegisterEq(Rn, Rt))
5380       return Error(Loc[0], "unpredictable STR instruction, writeback base "
5381                            "is also a source");
5382     break;
5383   }
5384   case AArch64::STXRB:
5385   case AArch64::STXRH:
5386   case AArch64::STXRW:
5387   case AArch64::STXRX:
5388   case AArch64::STLXRB:
5389   case AArch64::STLXRH:
5390   case AArch64::STLXRW:
5391   case AArch64::STLXRX: {
5392     unsigned Rs = Inst.getOperand(0).getReg();
5393     unsigned Rt = Inst.getOperand(1).getReg();
5394     unsigned Rn = Inst.getOperand(2).getReg();
5395     if (RI->isSubRegisterEq(Rt, Rs) ||
5396         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5397       return Error(Loc[0],
5398                    "unpredictable STXR instruction, status is also a source");
5399     break;
5400   }
5401   case AArch64::STXPW:
5402   case AArch64::STXPX:
5403   case AArch64::STLXPW:
5404   case AArch64::STLXPX: {
5405     unsigned Rs = Inst.getOperand(0).getReg();
5406     unsigned Rt1 = Inst.getOperand(1).getReg();
5407     unsigned Rt2 = Inst.getOperand(2).getReg();
5408     unsigned Rn = Inst.getOperand(3).getReg();
5409     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5410         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5411       return Error(Loc[0],
5412                    "unpredictable STXP instruction, status is also a source");
5413     break;
5414   }
5415   case AArch64::LDRABwriteback:
5416   case AArch64::LDRAAwriteback: {
5417     unsigned Xt = Inst.getOperand(0).getReg();
5418     unsigned Xn = Inst.getOperand(1).getReg();
5419     if (Xt == Xn)
5420       return Error(Loc[0],
5421           "unpredictable LDRA instruction, writeback base"
5422           " is also a destination");
5423     break;
5424   }
5425   }
5426 
5427   // Check v8.8-A memops instructions.
5428   switch (Inst.getOpcode()) {
5429   case AArch64::CPYFP:
5430   case AArch64::CPYFPWN:
5431   case AArch64::CPYFPRN:
5432   case AArch64::CPYFPN:
5433   case AArch64::CPYFPWT:
5434   case AArch64::CPYFPWTWN:
5435   case AArch64::CPYFPWTRN:
5436   case AArch64::CPYFPWTN:
5437   case AArch64::CPYFPRT:
5438   case AArch64::CPYFPRTWN:
5439   case AArch64::CPYFPRTRN:
5440   case AArch64::CPYFPRTN:
5441   case AArch64::CPYFPT:
5442   case AArch64::CPYFPTWN:
5443   case AArch64::CPYFPTRN:
5444   case AArch64::CPYFPTN:
5445   case AArch64::CPYFM:
5446   case AArch64::CPYFMWN:
5447   case AArch64::CPYFMRN:
5448   case AArch64::CPYFMN:
5449   case AArch64::CPYFMWT:
5450   case AArch64::CPYFMWTWN:
5451   case AArch64::CPYFMWTRN:
5452   case AArch64::CPYFMWTN:
5453   case AArch64::CPYFMRT:
5454   case AArch64::CPYFMRTWN:
5455   case AArch64::CPYFMRTRN:
5456   case AArch64::CPYFMRTN:
5457   case AArch64::CPYFMT:
5458   case AArch64::CPYFMTWN:
5459   case AArch64::CPYFMTRN:
5460   case AArch64::CPYFMTN:
5461   case AArch64::CPYFE:
5462   case AArch64::CPYFEWN:
5463   case AArch64::CPYFERN:
5464   case AArch64::CPYFEN:
5465   case AArch64::CPYFEWT:
5466   case AArch64::CPYFEWTWN:
5467   case AArch64::CPYFEWTRN:
5468   case AArch64::CPYFEWTN:
5469   case AArch64::CPYFERT:
5470   case AArch64::CPYFERTWN:
5471   case AArch64::CPYFERTRN:
5472   case AArch64::CPYFERTN:
5473   case AArch64::CPYFET:
5474   case AArch64::CPYFETWN:
5475   case AArch64::CPYFETRN:
5476   case AArch64::CPYFETN:
5477   case AArch64::CPYP:
5478   case AArch64::CPYPWN:
5479   case AArch64::CPYPRN:
5480   case AArch64::CPYPN:
5481   case AArch64::CPYPWT:
5482   case AArch64::CPYPWTWN:
5483   case AArch64::CPYPWTRN:
5484   case AArch64::CPYPWTN:
5485   case AArch64::CPYPRT:
5486   case AArch64::CPYPRTWN:
5487   case AArch64::CPYPRTRN:
5488   case AArch64::CPYPRTN:
5489   case AArch64::CPYPT:
5490   case AArch64::CPYPTWN:
5491   case AArch64::CPYPTRN:
5492   case AArch64::CPYPTN:
5493   case AArch64::CPYM:
5494   case AArch64::CPYMWN:
5495   case AArch64::CPYMRN:
5496   case AArch64::CPYMN:
5497   case AArch64::CPYMWT:
5498   case AArch64::CPYMWTWN:
5499   case AArch64::CPYMWTRN:
5500   case AArch64::CPYMWTN:
5501   case AArch64::CPYMRT:
5502   case AArch64::CPYMRTWN:
5503   case AArch64::CPYMRTRN:
5504   case AArch64::CPYMRTN:
5505   case AArch64::CPYMT:
5506   case AArch64::CPYMTWN:
5507   case AArch64::CPYMTRN:
5508   case AArch64::CPYMTN:
5509   case AArch64::CPYE:
5510   case AArch64::CPYEWN:
5511   case AArch64::CPYERN:
5512   case AArch64::CPYEN:
5513   case AArch64::CPYEWT:
5514   case AArch64::CPYEWTWN:
5515   case AArch64::CPYEWTRN:
5516   case AArch64::CPYEWTN:
5517   case AArch64::CPYERT:
5518   case AArch64::CPYERTWN:
5519   case AArch64::CPYERTRN:
5520   case AArch64::CPYERTN:
5521   case AArch64::CPYET:
5522   case AArch64::CPYETWN:
5523   case AArch64::CPYETRN:
5524   case AArch64::CPYETN: {
5525     unsigned Xd_wb = Inst.getOperand(0).getReg();
5526     unsigned Xs_wb = Inst.getOperand(1).getReg();
5527     unsigned Xn_wb = Inst.getOperand(2).getReg();
5528     unsigned Xd = Inst.getOperand(3).getReg();
5529     unsigned Xs = Inst.getOperand(4).getReg();
5530     unsigned Xn = Inst.getOperand(5).getReg();
5531     if (Xd_wb != Xd)
5532       return Error(Loc[0],
5533                    "invalid CPY instruction, Xd_wb and Xd do not match");
5534     if (Xs_wb != Xs)
5535       return Error(Loc[0],
5536                    "invalid CPY instruction, Xs_wb and Xs do not match");
5537     if (Xn_wb != Xn)
5538       return Error(Loc[0],
5539                    "invalid CPY instruction, Xn_wb and Xn do not match");
5540     if (Xd == Xs)
5541       return Error(Loc[0], "invalid CPY instruction, destination and source"
5542                            " registers are the same");
5543     if (Xd == Xn)
5544       return Error(Loc[0], "invalid CPY instruction, destination and size"
5545                            " registers are the same");
5546     if (Xs == Xn)
5547       return Error(Loc[0], "invalid CPY instruction, source and size"
5548                            " registers are the same");
5549     break;
5550   }
5551   case AArch64::SETP:
5552   case AArch64::SETPT:
5553   case AArch64::SETPN:
5554   case AArch64::SETPTN:
5555   case AArch64::SETM:
5556   case AArch64::SETMT:
5557   case AArch64::SETMN:
5558   case AArch64::SETMTN:
5559   case AArch64::SETE:
5560   case AArch64::SETET:
5561   case AArch64::SETEN:
5562   case AArch64::SETETN:
5563   case AArch64::SETGP:
5564   case AArch64::SETGPT:
5565   case AArch64::SETGPN:
5566   case AArch64::SETGPTN:
5567   case AArch64::SETGM:
5568   case AArch64::SETGMT:
5569   case AArch64::SETGMN:
5570   case AArch64::SETGMTN:
5571   case AArch64::MOPSSETGE:
5572   case AArch64::MOPSSETGET:
5573   case AArch64::MOPSSETGEN:
5574   case AArch64::MOPSSETGETN: {
5575     unsigned Xd_wb = Inst.getOperand(0).getReg();
5576     unsigned Xn_wb = Inst.getOperand(1).getReg();
5577     unsigned Xd = Inst.getOperand(2).getReg();
5578     unsigned Xn = Inst.getOperand(3).getReg();
5579     unsigned Xm = Inst.getOperand(4).getReg();
5580     if (Xd_wb != Xd)
5581       return Error(Loc[0],
5582                    "invalid SET instruction, Xd_wb and Xd do not match");
5583     if (Xn_wb != Xn)
5584       return Error(Loc[0],
5585                    "invalid SET instruction, Xn_wb and Xn do not match");
5586     if (Xd == Xn)
5587       return Error(Loc[0], "invalid SET instruction, destination and size"
5588                            " registers are the same");
5589     if (Xd == Xm)
5590       return Error(Loc[0], "invalid SET instruction, destination and source"
5591                            " registers are the same");
5592     if (Xn == Xm)
5593       return Error(Loc[0], "invalid SET instruction, source and size"
5594                            " registers are the same");
5595     break;
5596   }
5597   }
5598 
5599   // Now check immediate ranges. Separate from the above as there is overlap
5600   // in the instructions being checked and this keeps the nested conditionals
5601   // to a minimum.
5602   switch (Inst.getOpcode()) {
5603   case AArch64::ADDSWri:
5604   case AArch64::ADDSXri:
5605   case AArch64::ADDWri:
5606   case AArch64::ADDXri:
5607   case AArch64::SUBSWri:
5608   case AArch64::SUBSXri:
5609   case AArch64::SUBWri:
5610   case AArch64::SUBXri: {
5611     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5612     // some slight duplication here.
5613     if (Inst.getOperand(2).isExpr()) {
5614       const MCExpr *Expr = Inst.getOperand(2).getExpr();
5615       AArch64MCExpr::VariantKind ELFRefKind;
5616       MCSymbolRefExpr::VariantKind DarwinRefKind;
5617       int64_t Addend;
5618       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5619 
5620         // Only allow these with ADDXri.
5621         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5622              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5623             Inst.getOpcode() == AArch64::ADDXri)
5624           return false;
5625 
5626         // Only allow these with ADDXri/ADDWri
5627         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5628              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5629              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5630              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5631              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5632              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5633              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5634              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5635              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5636              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5637             (Inst.getOpcode() == AArch64::ADDXri ||
5638              Inst.getOpcode() == AArch64::ADDWri))
5639           return false;
5640 
5641         // Don't allow symbol refs in the immediate field otherwise
5642         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5643         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5644         // 'cmp w0, 'borked')
5645         return Error(Loc.back(), "invalid immediate expression");
5646       }
5647       // We don't validate more complex expressions here
5648     }
5649     return false;
5650   }
5651   default:
5652     return false;
5653   }
5654 }
5655 
5656 static std::string AArch64MnemonicSpellCheck(StringRef S,
5657                                              const FeatureBitset &FBS,
5658                                              unsigned VariantID = 0);
5659 
5660 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5661                                       uint64_t ErrorInfo,
5662                                       OperandVector &Operands) {
5663   switch (ErrCode) {
5664   case Match_InvalidTiedOperand: {
5665     auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5666     if (Op.isVectorList())
5667       return Error(Loc, "operand must match destination register list");
5668 
5669     assert(Op.isReg() && "Unexpected operand type");
5670     switch (Op.getRegEqualityTy()) {
5671     case RegConstraintEqualityTy::EqualsSubReg:
5672       return Error(Loc, "operand must be 64-bit form of destination register");
5673     case RegConstraintEqualityTy::EqualsSuperReg:
5674       return Error(Loc, "operand must be 32-bit form of destination register");
5675     case RegConstraintEqualityTy::EqualsReg:
5676       return Error(Loc, "operand must match destination register");
5677     }
5678     llvm_unreachable("Unknown RegConstraintEqualityTy");
5679   }
5680   case Match_MissingFeature:
5681     return Error(Loc,
5682                  "instruction requires a CPU feature not currently enabled");
5683   case Match_InvalidOperand:
5684     return Error(Loc, "invalid operand for instruction");
5685   case Match_InvalidSuffix:
5686     return Error(Loc, "invalid type suffix for instruction");
5687   case Match_InvalidCondCode:
5688     return Error(Loc, "expected AArch64 condition code");
5689   case Match_AddSubRegExtendSmall:
5690     return Error(Loc,
5691       "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5692   case Match_AddSubRegExtendLarge:
5693     return Error(Loc,
5694       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5695   case Match_AddSubSecondSource:
5696     return Error(Loc,
5697       "expected compatible register, symbol or integer in range [0, 4095]");
5698   case Match_LogicalSecondSource:
5699     return Error(Loc, "expected compatible register or logical immediate");
5700   case Match_InvalidMovImm32Shift:
5701     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5702   case Match_InvalidMovImm64Shift:
5703     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5704   case Match_AddSubRegShift32:
5705     return Error(Loc,
5706        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5707   case Match_AddSubRegShift64:
5708     return Error(Loc,
5709        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5710   case Match_InvalidFPImm:
5711     return Error(Loc,
5712                  "expected compatible register or floating-point constant");
5713   case Match_InvalidMemoryIndexedSImm6:
5714     return Error(Loc, "index must be an integer in range [-32, 31].");
5715   case Match_InvalidMemoryIndexedSImm5:
5716     return Error(Loc, "index must be an integer in range [-16, 15].");
5717   case Match_InvalidMemoryIndexed1SImm4:
5718     return Error(Loc, "index must be an integer in range [-8, 7].");
5719   case Match_InvalidMemoryIndexed2SImm4:
5720     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5721   case Match_InvalidMemoryIndexed3SImm4:
5722     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5723   case Match_InvalidMemoryIndexed4SImm4:
5724     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5725   case Match_InvalidMemoryIndexed16SImm4:
5726     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5727   case Match_InvalidMemoryIndexed32SImm4:
5728     return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5729   case Match_InvalidMemoryIndexed1SImm6:
5730     return Error(Loc, "index must be an integer in range [-32, 31].");
5731   case Match_InvalidMemoryIndexedSImm8:
5732     return Error(Loc, "index must be an integer in range [-128, 127].");
5733   case Match_InvalidMemoryIndexedSImm9:
5734     return Error(Loc, "index must be an integer in range [-256, 255].");
5735   case Match_InvalidMemoryIndexed16SImm9:
5736     return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5737   case Match_InvalidMemoryIndexed8SImm10:
5738     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5739   case Match_InvalidMemoryIndexed4SImm7:
5740     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5741   case Match_InvalidMemoryIndexed8SImm7:
5742     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5743   case Match_InvalidMemoryIndexed16SImm7:
5744     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5745   case Match_InvalidMemoryIndexed8UImm5:
5746     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5747   case Match_InvalidMemoryIndexed8UImm3:
5748     return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5749   case Match_InvalidMemoryIndexed4UImm5:
5750     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5751   case Match_InvalidMemoryIndexed2UImm5:
5752     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5753   case Match_InvalidMemoryIndexed8UImm6:
5754     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5755   case Match_InvalidMemoryIndexed16UImm6:
5756     return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5757   case Match_InvalidMemoryIndexed4UImm6:
5758     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5759   case Match_InvalidMemoryIndexed2UImm6:
5760     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5761   case Match_InvalidMemoryIndexed1UImm6:
5762     return Error(Loc, "index must be in range [0, 63].");
5763   case Match_InvalidMemoryWExtend8:
5764     return Error(Loc,
5765                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
5766   case Match_InvalidMemoryWExtend16:
5767     return Error(Loc,
5768                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5769   case Match_InvalidMemoryWExtend32:
5770     return Error(Loc,
5771                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5772   case Match_InvalidMemoryWExtend64:
5773     return Error(Loc,
5774                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5775   case Match_InvalidMemoryWExtend128:
5776     return Error(Loc,
5777                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5778   case Match_InvalidMemoryXExtend8:
5779     return Error(Loc,
5780                  "expected 'lsl' or 'sxtx' with optional shift of #0");
5781   case Match_InvalidMemoryXExtend16:
5782     return Error(Loc,
5783                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5784   case Match_InvalidMemoryXExtend32:
5785     return Error(Loc,
5786                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5787   case Match_InvalidMemoryXExtend64:
5788     return Error(Loc,
5789                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5790   case Match_InvalidMemoryXExtend128:
5791     return Error(Loc,
5792                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5793   case Match_InvalidMemoryIndexed1:
5794     return Error(Loc, "index must be an integer in range [0, 4095].");
5795   case Match_InvalidMemoryIndexed2:
5796     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5797   case Match_InvalidMemoryIndexed4:
5798     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5799   case Match_InvalidMemoryIndexed8:
5800     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5801   case Match_InvalidMemoryIndexed16:
5802     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5803   case Match_InvalidImm0_0:
5804     return Error(Loc, "immediate must be 0.");
5805   case Match_InvalidImm0_1:
5806     return Error(Loc, "immediate must be an integer in range [0, 1].");
5807   case Match_InvalidImm0_3:
5808     return Error(Loc, "immediate must be an integer in range [0, 3].");
5809   case Match_InvalidImm0_7:
5810     return Error(Loc, "immediate must be an integer in range [0, 7].");
5811   case Match_InvalidImm0_15:
5812     return Error(Loc, "immediate must be an integer in range [0, 15].");
5813   case Match_InvalidImm0_31:
5814     return Error(Loc, "immediate must be an integer in range [0, 31].");
5815   case Match_InvalidImm0_63:
5816     return Error(Loc, "immediate must be an integer in range [0, 63].");
5817   case Match_InvalidImm0_127:
5818     return Error(Loc, "immediate must be an integer in range [0, 127].");
5819   case Match_InvalidImm0_255:
5820     return Error(Loc, "immediate must be an integer in range [0, 255].");
5821   case Match_InvalidImm0_65535:
5822     return Error(Loc, "immediate must be an integer in range [0, 65535].");
5823   case Match_InvalidImm1_8:
5824     return Error(Loc, "immediate must be an integer in range [1, 8].");
5825   case Match_InvalidImm1_16:
5826     return Error(Loc, "immediate must be an integer in range [1, 16].");
5827   case Match_InvalidImm1_32:
5828     return Error(Loc, "immediate must be an integer in range [1, 32].");
5829   case Match_InvalidImm1_64:
5830     return Error(Loc, "immediate must be an integer in range [1, 64].");
5831   case Match_InvalidMemoryIndexedRange2UImm0:
5832     return Error(Loc, "vector select offset must be the immediate range 0:1.");
5833   case Match_InvalidMemoryIndexedRange2UImm1:
5834     return Error(Loc, "vector select offset must be an immediate range of the "
5835                       "form <immf>:<imml>, where the first "
5836                       "immediate is a multiple of 2 in the range [0, 2], and "
5837                       "the second immediate is immf + 1.");
5838   case Match_InvalidMemoryIndexedRange2UImm2:
5839   case Match_InvalidMemoryIndexedRange2UImm3:
5840     return Error(
5841         Loc,
5842         "vector select offset must be an immediate range of the form "
5843         "<immf>:<imml>, "
5844         "where the first immediate is a multiple of 2 in the range [0, 6] or "
5845         "[0, 14] "
5846         "depending on the instruction, and the second immediate is immf + 1.");
5847   case Match_InvalidMemoryIndexedRange4UImm0:
5848     return Error(Loc, "vector select offset must be the immediate range 0:3.");
5849   case Match_InvalidMemoryIndexedRange4UImm1:
5850   case Match_InvalidMemoryIndexedRange4UImm2:
5851     return Error(
5852         Loc,
5853         "vector select offset must be an immediate range of the form "
5854         "<immf>:<imml>, "
5855         "where the first immediate is a multiple of 4 in the range [0, 4] or "
5856         "[0, 12] "
5857         "depending on the instruction, and the second immediate is immf + 3.");
5858   case Match_InvalidSVEAddSubImm8:
5859     return Error(Loc, "immediate must be an integer in range [0, 255]"
5860                       " with a shift amount of 0");
5861   case Match_InvalidSVEAddSubImm16:
5862   case Match_InvalidSVEAddSubImm32:
5863   case Match_InvalidSVEAddSubImm64:
5864     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5865                       "multiple of 256 in range [256, 65280]");
5866   case Match_InvalidSVECpyImm8:
5867     return Error(Loc, "immediate must be an integer in range [-128, 255]"
5868                       " with a shift amount of 0");
5869   case Match_InvalidSVECpyImm16:
5870     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5871                       "multiple of 256 in range [-32768, 65280]");
5872   case Match_InvalidSVECpyImm32:
5873   case Match_InvalidSVECpyImm64:
5874     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5875                       "multiple of 256 in range [-32768, 32512]");
5876   case Match_InvalidIndexRange0_0:
5877     return Error(Loc, "expected lane specifier '[0]'");
5878   case Match_InvalidIndexRange1_1:
5879     return Error(Loc, "expected lane specifier '[1]'");
5880   case Match_InvalidIndexRange0_15:
5881     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5882   case Match_InvalidIndexRange0_7:
5883     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5884   case Match_InvalidIndexRange0_3:
5885     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5886   case Match_InvalidIndexRange0_1:
5887     return Error(Loc, "vector lane must be an integer in range [0, 1].");
5888   case Match_InvalidSVEIndexRange0_63:
5889     return Error(Loc, "vector lane must be an integer in range [0, 63].");
5890   case Match_InvalidSVEIndexRange0_31:
5891     return Error(Loc, "vector lane must be an integer in range [0, 31].");
5892   case Match_InvalidSVEIndexRange0_15:
5893     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5894   case Match_InvalidSVEIndexRange0_7:
5895     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5896   case Match_InvalidSVEIndexRange0_3:
5897     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5898   case Match_InvalidLabel:
5899     return Error(Loc, "expected label or encodable integer pc offset");
5900   case Match_MRS:
5901     return Error(Loc, "expected readable system register");
5902   case Match_MSR:
5903   case Match_InvalidSVCR:
5904     return Error(Loc, "expected writable system register or pstate");
5905   case Match_InvalidComplexRotationEven:
5906     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5907   case Match_InvalidComplexRotationOdd:
5908     return Error(Loc, "complex rotation must be 90 or 270.");
5909   case Match_MnemonicFail: {
5910     std::string Suggestion = AArch64MnemonicSpellCheck(
5911         ((AArch64Operand &)*Operands[0]).getToken(),
5912         ComputeAvailableFeatures(STI->getFeatureBits()));
5913     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5914   }
5915   case Match_InvalidGPR64shifted8:
5916     return Error(Loc, "register must be x0..x30 or xzr, without shift");
5917   case Match_InvalidGPR64shifted16:
5918     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5919   case Match_InvalidGPR64shifted32:
5920     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5921   case Match_InvalidGPR64shifted64:
5922     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5923   case Match_InvalidGPR64shifted128:
5924     return Error(
5925         Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5926   case Match_InvalidGPR64NoXZRshifted8:
5927     return Error(Loc, "register must be x0..x30 without shift");
5928   case Match_InvalidGPR64NoXZRshifted16:
5929     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5930   case Match_InvalidGPR64NoXZRshifted32:
5931     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5932   case Match_InvalidGPR64NoXZRshifted64:
5933     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5934   case Match_InvalidGPR64NoXZRshifted128:
5935     return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5936   case Match_InvalidZPR32UXTW8:
5937   case Match_InvalidZPR32SXTW8:
5938     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5939   case Match_InvalidZPR32UXTW16:
5940   case Match_InvalidZPR32SXTW16:
5941     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5942   case Match_InvalidZPR32UXTW32:
5943   case Match_InvalidZPR32SXTW32:
5944     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5945   case Match_InvalidZPR32UXTW64:
5946   case Match_InvalidZPR32SXTW64:
5947     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5948   case Match_InvalidZPR64UXTW8:
5949   case Match_InvalidZPR64SXTW8:
5950     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5951   case Match_InvalidZPR64UXTW16:
5952   case Match_InvalidZPR64SXTW16:
5953     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5954   case Match_InvalidZPR64UXTW32:
5955   case Match_InvalidZPR64SXTW32:
5956     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5957   case Match_InvalidZPR64UXTW64:
5958   case Match_InvalidZPR64SXTW64:
5959     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5960   case Match_InvalidZPR32LSL8:
5961     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5962   case Match_InvalidZPR32LSL16:
5963     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5964   case Match_InvalidZPR32LSL32:
5965     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5966   case Match_InvalidZPR32LSL64:
5967     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5968   case Match_InvalidZPR64LSL8:
5969     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5970   case Match_InvalidZPR64LSL16:
5971     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5972   case Match_InvalidZPR64LSL32:
5973     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5974   case Match_InvalidZPR64LSL64:
5975     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5976   case Match_InvalidZPR0:
5977     return Error(Loc, "expected register without element width suffix");
5978   case Match_InvalidZPR8:
5979   case Match_InvalidZPR16:
5980   case Match_InvalidZPR32:
5981   case Match_InvalidZPR64:
5982   case Match_InvalidZPR128:
5983     return Error(Loc, "invalid element width");
5984   case Match_InvalidZPR_3b8:
5985     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5986   case Match_InvalidZPR_3b16:
5987     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5988   case Match_InvalidZPR_3b32:
5989     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5990   case Match_InvalidZPR_4b8:
5991     return Error(Loc,
5992                  "Invalid restricted vector register, expected z0.b..z15.b");
5993   case Match_InvalidZPR_4b16:
5994     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5995   case Match_InvalidZPR_4b32:
5996     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5997   case Match_InvalidZPR_4b64:
5998     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5999   case Match_InvalidSVEPattern:
6000     return Error(Loc, "invalid predicate pattern");
6001   case Match_InvalidSVEPredicateAnyReg:
6002   case Match_InvalidSVEPredicateBReg:
6003   case Match_InvalidSVEPredicateHReg:
6004   case Match_InvalidSVEPredicateSReg:
6005   case Match_InvalidSVEPredicateDReg:
6006     return Error(Loc, "invalid predicate register.");
6007   case Match_InvalidSVEPredicate3bAnyReg:
6008     return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6009   case Match_InvalidSVEPNPredicateB_p8to15Reg:
6010   case Match_InvalidSVEPNPredicateH_p8to15Reg:
6011   case Match_InvalidSVEPNPredicateS_p8to15Reg:
6012   case Match_InvalidSVEPNPredicateD_p8to15Reg:
6013     return Error(Loc, "Invalid predicate register, expected PN in range "
6014                       "pn8..pn15 with element suffix.");
6015   case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6016     return Error(Loc, "invalid restricted predicate-as-counter register "
6017                       "expected pn8..pn15");
6018   case Match_InvalidSVEPNPredicateBReg:
6019   case Match_InvalidSVEPNPredicateHReg:
6020   case Match_InvalidSVEPNPredicateSReg:
6021   case Match_InvalidSVEPNPredicateDReg:
6022     return Error(Loc, "Invalid predicate register, expected PN in range "
6023                       "pn0..pn15 with element suffix.");
6024   case Match_InvalidSVEVecLenSpecifier:
6025     return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6026   case Match_InvalidSVEPredicateListMul2x8:
6027   case Match_InvalidSVEPredicateListMul2x16:
6028   case Match_InvalidSVEPredicateListMul2x32:
6029   case Match_InvalidSVEPredicateListMul2x64:
6030     return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6031                       "predicate registers, where the first vector is a multiple of 2 "
6032                       "and with correct element type");
6033   case Match_InvalidSVEExactFPImmOperandHalfOne:
6034     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6035   case Match_InvalidSVEExactFPImmOperandHalfTwo:
6036     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6037   case Match_InvalidSVEExactFPImmOperandZeroOne:
6038     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6039   case Match_InvalidMatrixTileVectorH8:
6040   case Match_InvalidMatrixTileVectorV8:
6041     return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6042   case Match_InvalidMatrixTileVectorH16:
6043   case Match_InvalidMatrixTileVectorV16:
6044     return Error(Loc,
6045                  "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6046   case Match_InvalidMatrixTileVectorH32:
6047   case Match_InvalidMatrixTileVectorV32:
6048     return Error(Loc,
6049                  "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6050   case Match_InvalidMatrixTileVectorH64:
6051   case Match_InvalidMatrixTileVectorV64:
6052     return Error(Loc,
6053                  "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6054   case Match_InvalidMatrixTileVectorH128:
6055   case Match_InvalidMatrixTileVectorV128:
6056     return Error(Loc,
6057                  "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6058   case Match_InvalidMatrixTile32:
6059     return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6060   case Match_InvalidMatrixTile64:
6061     return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6062   case Match_InvalidMatrix:
6063     return Error(Loc, "invalid matrix operand, expected za");
6064   case Match_InvalidMatrix8:
6065     return Error(Loc, "invalid matrix operand, expected suffix .b");
6066   case Match_InvalidMatrix16:
6067     return Error(Loc, "invalid matrix operand, expected suffix .h");
6068   case Match_InvalidMatrix32:
6069     return Error(Loc, "invalid matrix operand, expected suffix .s");
6070   case Match_InvalidMatrix64:
6071     return Error(Loc, "invalid matrix operand, expected suffix .d");
6072   case Match_InvalidMatrixIndexGPR32_12_15:
6073     return Error(Loc, "operand must be a register in range [w12, w15]");
6074   case Match_InvalidMatrixIndexGPR32_8_11:
6075     return Error(Loc, "operand must be a register in range [w8, w11]");
6076   case Match_InvalidSVEVectorListMul2x8:
6077   case Match_InvalidSVEVectorListMul2x16:
6078   case Match_InvalidSVEVectorListMul2x32:
6079   case Match_InvalidSVEVectorListMul2x64:
6080     return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6081                       "SVE vectors, where the first vector is a multiple of 2 "
6082                       "and with matching element types");
6083   case Match_InvalidSVEVectorListMul4x8:
6084   case Match_InvalidSVEVectorListMul4x16:
6085   case Match_InvalidSVEVectorListMul4x32:
6086   case Match_InvalidSVEVectorListMul4x64:
6087     return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6088                       "SVE vectors, where the first vector is a multiple of 4 "
6089                       "and with matching element types");
6090   case Match_InvalidLookupTable:
6091     return Error(Loc, "Invalid lookup table, expected zt0");
6092   case Match_InvalidSVEVectorListStrided2x8:
6093   case Match_InvalidSVEVectorListStrided2x16:
6094   case Match_InvalidSVEVectorListStrided2x32:
6095   case Match_InvalidSVEVectorListStrided2x64:
6096     return Error(
6097         Loc,
6098         "Invalid vector list, expected list with each SVE vector in the list "
6099         "8 registers apart, and the first register in the range [z0, z7] or "
6100         "[z16, z23] and with correct element type");
6101   case Match_InvalidSVEVectorListStrided4x8:
6102   case Match_InvalidSVEVectorListStrided4x16:
6103   case Match_InvalidSVEVectorListStrided4x32:
6104   case Match_InvalidSVEVectorListStrided4x64:
6105     return Error(
6106         Loc,
6107         "Invalid vector list, expected list with each SVE vector in the list "
6108         "4 registers apart, and the first register in the range [z0, z3] or "
6109         "[z16, z19] and with correct element type");
6110   case Match_AddSubLSLImm3ShiftLarge:
6111     return Error(Loc,
6112       "expected 'lsl' with optional integer in range [0, 7]");
6113   default:
6114     llvm_unreachable("unexpected error code!");
6115   }
6116 }
6117 
6118 static const char *getSubtargetFeatureName(uint64_t Val);
6119 
6120 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6121                                                OperandVector &Operands,
6122                                                MCStreamer &Out,
6123                                                uint64_t &ErrorInfo,
6124                                                bool MatchingInlineAsm) {
6125   assert(!Operands.empty() && "Unexpect empty operand list!");
6126   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6127   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6128 
6129   StringRef Tok = Op.getToken();
6130   unsigned NumOperands = Operands.size();
6131 
6132   if (NumOperands == 4 && Tok == "lsl") {
6133     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6134     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6135     if (Op2.isScalarReg() && Op3.isImm()) {
6136       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6137       if (Op3CE) {
6138         uint64_t Op3Val = Op3CE->getValue();
6139         uint64_t NewOp3Val = 0;
6140         uint64_t NewOp4Val = 0;
6141         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6142                 Op2.getReg())) {
6143           NewOp3Val = (32 - Op3Val) & 0x1f;
6144           NewOp4Val = 31 - Op3Val;
6145         } else {
6146           NewOp3Val = (64 - Op3Val) & 0x3f;
6147           NewOp4Val = 63 - Op3Val;
6148         }
6149 
6150         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6151         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6152 
6153         Operands[0] =
6154             AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6155         Operands.push_back(AArch64Operand::CreateImm(
6156             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6157         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6158                                                 Op3.getEndLoc(), getContext());
6159       }
6160     }
6161   } else if (NumOperands == 4 && Tok == "bfc") {
6162     // FIXME: Horrible hack to handle BFC->BFM alias.
6163     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6164     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6165     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6166 
6167     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6168       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6169       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6170 
6171       if (LSBCE && WidthCE) {
6172         uint64_t LSB = LSBCE->getValue();
6173         uint64_t Width = WidthCE->getValue();
6174 
6175         uint64_t RegWidth = 0;
6176         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6177                 Op1.getReg()))
6178           RegWidth = 64;
6179         else
6180           RegWidth = 32;
6181 
6182         if (LSB >= RegWidth)
6183           return Error(LSBOp.getStartLoc(),
6184                        "expected integer in range [0, 31]");
6185         if (Width < 1 || Width > RegWidth)
6186           return Error(WidthOp.getStartLoc(),
6187                        "expected integer in range [1, 32]");
6188 
6189         uint64_t ImmR = 0;
6190         if (RegWidth == 32)
6191           ImmR = (32 - LSB) & 0x1f;
6192         else
6193           ImmR = (64 - LSB) & 0x3f;
6194 
6195         uint64_t ImmS = Width - 1;
6196 
6197         if (ImmR != 0 && ImmS >= ImmR)
6198           return Error(WidthOp.getStartLoc(),
6199                        "requested insert overflows register");
6200 
6201         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6202         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6203         Operands[0] =
6204             AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6205         Operands[2] = AArch64Operand::CreateReg(
6206             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6207             SMLoc(), SMLoc(), getContext());
6208         Operands[3] = AArch64Operand::CreateImm(
6209             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6210         Operands.emplace_back(
6211             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6212                                       WidthOp.getEndLoc(), getContext()));
6213       }
6214     }
6215   } else if (NumOperands == 5) {
6216     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6217     // UBFIZ -> UBFM aliases.
6218     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6219       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6220       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6221       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6222 
6223       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6224         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6225         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6226 
6227         if (Op3CE && Op4CE) {
6228           uint64_t Op3Val = Op3CE->getValue();
6229           uint64_t Op4Val = Op4CE->getValue();
6230 
6231           uint64_t RegWidth = 0;
6232           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6233                   Op1.getReg()))
6234             RegWidth = 64;
6235           else
6236             RegWidth = 32;
6237 
6238           if (Op3Val >= RegWidth)
6239             return Error(Op3.getStartLoc(),
6240                          "expected integer in range [0, 31]");
6241           if (Op4Val < 1 || Op4Val > RegWidth)
6242             return Error(Op4.getStartLoc(),
6243                          "expected integer in range [1, 32]");
6244 
6245           uint64_t NewOp3Val = 0;
6246           if (RegWidth == 32)
6247             NewOp3Val = (32 - Op3Val) & 0x1f;
6248           else
6249             NewOp3Val = (64 - Op3Val) & 0x3f;
6250 
6251           uint64_t NewOp4Val = Op4Val - 1;
6252 
6253           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6254             return Error(Op4.getStartLoc(),
6255                          "requested insert overflows register");
6256 
6257           const MCExpr *NewOp3 =
6258               MCConstantExpr::create(NewOp3Val, getContext());
6259           const MCExpr *NewOp4 =
6260               MCConstantExpr::create(NewOp4Val, getContext());
6261           Operands[3] = AArch64Operand::CreateImm(
6262               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6263           Operands[4] = AArch64Operand::CreateImm(
6264               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6265           if (Tok == "bfi")
6266             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6267                                                       getContext());
6268           else if (Tok == "sbfiz")
6269             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6270                                                       getContext());
6271           else if (Tok == "ubfiz")
6272             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6273                                                       getContext());
6274           else
6275             llvm_unreachable("No valid mnemonic for alias?");
6276         }
6277       }
6278 
6279       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6280       // UBFX -> UBFM aliases.
6281     } else if (NumOperands == 5 &&
6282                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6283       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6284       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6285       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6286 
6287       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6288         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6289         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6290 
6291         if (Op3CE && Op4CE) {
6292           uint64_t Op3Val = Op3CE->getValue();
6293           uint64_t Op4Val = Op4CE->getValue();
6294 
6295           uint64_t RegWidth = 0;
6296           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6297                   Op1.getReg()))
6298             RegWidth = 64;
6299           else
6300             RegWidth = 32;
6301 
6302           if (Op3Val >= RegWidth)
6303             return Error(Op3.getStartLoc(),
6304                          "expected integer in range [0, 31]");
6305           if (Op4Val < 1 || Op4Val > RegWidth)
6306             return Error(Op4.getStartLoc(),
6307                          "expected integer in range [1, 32]");
6308 
6309           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6310 
6311           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6312             return Error(Op4.getStartLoc(),
6313                          "requested extract overflows register");
6314 
6315           const MCExpr *NewOp4 =
6316               MCConstantExpr::create(NewOp4Val, getContext());
6317           Operands[4] = AArch64Operand::CreateImm(
6318               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6319           if (Tok == "bfxil")
6320             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6321                                                       getContext());
6322           else if (Tok == "sbfx")
6323             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6324                                                       getContext());
6325           else if (Tok == "ubfx")
6326             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6327                                                       getContext());
6328           else
6329             llvm_unreachable("No valid mnemonic for alias?");
6330         }
6331       }
6332     }
6333   }
6334 
6335   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6336   // instruction for FP registers correctly in some rare circumstances. Convert
6337   // it to a safe instruction and warn (because silently changing someone's
6338   // assembly is rude).
6339   if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6340       NumOperands == 4 && Tok == "movi") {
6341     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6342     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6343     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6344     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6345         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6346       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6347       if (Suffix.lower() == ".2d" &&
6348           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6349         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6350                 " correctly on this CPU, converting to equivalent movi.16b");
6351         // Switch the suffix to .16b.
6352         unsigned Idx = Op1.isToken() ? 1 : 2;
6353         Operands[Idx] =
6354             AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6355       }
6356     }
6357   }
6358 
6359   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6360   //        InstAlias can't quite handle this since the reg classes aren't
6361   //        subclasses.
6362   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6363     // The source register can be Wn here, but the matcher expects a
6364     // GPR64. Twiddle it here if necessary.
6365     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6366     if (Op.isScalarReg()) {
6367       unsigned Reg = getXRegFromWReg(Op.getReg());
6368       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6369                                               Op.getStartLoc(), Op.getEndLoc(),
6370                                               getContext());
6371     }
6372   }
6373   // FIXME: Likewise for sxt[bh] with a Xd dst operand
6374   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6375     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6376     if (Op.isScalarReg() &&
6377         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6378             Op.getReg())) {
6379       // The source register can be Wn here, but the matcher expects a
6380       // GPR64. Twiddle it here if necessary.
6381       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6382       if (Op.isScalarReg()) {
6383         unsigned Reg = getXRegFromWReg(Op.getReg());
6384         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6385                                                 Op.getStartLoc(),
6386                                                 Op.getEndLoc(), getContext());
6387       }
6388     }
6389   }
6390   // FIXME: Likewise for uxt[bh] with a Xd dst operand
6391   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6392     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6393     if (Op.isScalarReg() &&
6394         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6395             Op.getReg())) {
6396       // The source register can be Wn here, but the matcher expects a
6397       // GPR32. Twiddle it here if necessary.
6398       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6399       if (Op.isScalarReg()) {
6400         unsigned Reg = getWRegFromXReg(Op.getReg());
6401         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6402                                                 Op.getStartLoc(),
6403                                                 Op.getEndLoc(), getContext());
6404       }
6405     }
6406   }
6407 
6408   MCInst Inst;
6409   FeatureBitset MissingFeatures;
6410   // First try to match against the secondary set of tables containing the
6411   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6412   unsigned MatchResult =
6413       MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6414                            MatchingInlineAsm, 1);
6415 
6416   // If that fails, try against the alternate table containing long-form NEON:
6417   // "fadd v0.2s, v1.2s, v2.2s"
6418   if (MatchResult != Match_Success) {
6419     // But first, save the short-form match result: we can use it in case the
6420     // long-form match also fails.
6421     auto ShortFormNEONErrorInfo = ErrorInfo;
6422     auto ShortFormNEONMatchResult = MatchResult;
6423     auto ShortFormNEONMissingFeatures = MissingFeatures;
6424 
6425     MatchResult =
6426         MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6427                              MatchingInlineAsm, 0);
6428 
6429     // Now, both matches failed, and the long-form match failed on the mnemonic
6430     // suffix token operand.  The short-form match failure is probably more
6431     // relevant: use it instead.
6432     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6433         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6434         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6435       MatchResult = ShortFormNEONMatchResult;
6436       ErrorInfo = ShortFormNEONErrorInfo;
6437       MissingFeatures = ShortFormNEONMissingFeatures;
6438     }
6439   }
6440 
6441   switch (MatchResult) {
6442   case Match_Success: {
6443     // Perform range checking and other semantic validations
6444     SmallVector<SMLoc, 8> OperandLocs;
6445     NumOperands = Operands.size();
6446     for (unsigned i = 1; i < NumOperands; ++i)
6447       OperandLocs.push_back(Operands[i]->getStartLoc());
6448     if (validateInstruction(Inst, IDLoc, OperandLocs))
6449       return true;
6450 
6451     Inst.setLoc(IDLoc);
6452     Out.emitInstruction(Inst, getSTI());
6453     return false;
6454   }
6455   case Match_MissingFeature: {
6456     assert(MissingFeatures.any() && "Unknown missing feature!");
6457     // Special case the error message for the very common case where only
6458     // a single subtarget feature is missing (neon, e.g.).
6459     std::string Msg = "instruction requires:";
6460     for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6461       if (MissingFeatures[i]) {
6462         Msg += " ";
6463         Msg += getSubtargetFeatureName(i);
6464       }
6465     }
6466     return Error(IDLoc, Msg);
6467   }
6468   case Match_MnemonicFail:
6469     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6470   case Match_InvalidOperand: {
6471     SMLoc ErrorLoc = IDLoc;
6472 
6473     if (ErrorInfo != ~0ULL) {
6474       if (ErrorInfo >= Operands.size())
6475         return Error(IDLoc, "too few operands for instruction",
6476                      SMRange(IDLoc, getTok().getLoc()));
6477 
6478       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6479       if (ErrorLoc == SMLoc())
6480         ErrorLoc = IDLoc;
6481     }
6482     // If the match failed on a suffix token operand, tweak the diagnostic
6483     // accordingly.
6484     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6485         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6486       MatchResult = Match_InvalidSuffix;
6487 
6488     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6489   }
6490   case Match_InvalidTiedOperand:
6491   case Match_InvalidMemoryIndexed1:
6492   case Match_InvalidMemoryIndexed2:
6493   case Match_InvalidMemoryIndexed4:
6494   case Match_InvalidMemoryIndexed8:
6495   case Match_InvalidMemoryIndexed16:
6496   case Match_InvalidCondCode:
6497   case Match_AddSubLSLImm3ShiftLarge:
6498   case Match_AddSubRegExtendSmall:
6499   case Match_AddSubRegExtendLarge:
6500   case Match_AddSubSecondSource:
6501   case Match_LogicalSecondSource:
6502   case Match_AddSubRegShift32:
6503   case Match_AddSubRegShift64:
6504   case Match_InvalidMovImm32Shift:
6505   case Match_InvalidMovImm64Shift:
6506   case Match_InvalidFPImm:
6507   case Match_InvalidMemoryWExtend8:
6508   case Match_InvalidMemoryWExtend16:
6509   case Match_InvalidMemoryWExtend32:
6510   case Match_InvalidMemoryWExtend64:
6511   case Match_InvalidMemoryWExtend128:
6512   case Match_InvalidMemoryXExtend8:
6513   case Match_InvalidMemoryXExtend16:
6514   case Match_InvalidMemoryXExtend32:
6515   case Match_InvalidMemoryXExtend64:
6516   case Match_InvalidMemoryXExtend128:
6517   case Match_InvalidMemoryIndexed1SImm4:
6518   case Match_InvalidMemoryIndexed2SImm4:
6519   case Match_InvalidMemoryIndexed3SImm4:
6520   case Match_InvalidMemoryIndexed4SImm4:
6521   case Match_InvalidMemoryIndexed1SImm6:
6522   case Match_InvalidMemoryIndexed16SImm4:
6523   case Match_InvalidMemoryIndexed32SImm4:
6524   case Match_InvalidMemoryIndexed4SImm7:
6525   case Match_InvalidMemoryIndexed8SImm7:
6526   case Match_InvalidMemoryIndexed16SImm7:
6527   case Match_InvalidMemoryIndexed8UImm5:
6528   case Match_InvalidMemoryIndexed8UImm3:
6529   case Match_InvalidMemoryIndexed4UImm5:
6530   case Match_InvalidMemoryIndexed2UImm5:
6531   case Match_InvalidMemoryIndexed1UImm6:
6532   case Match_InvalidMemoryIndexed2UImm6:
6533   case Match_InvalidMemoryIndexed4UImm6:
6534   case Match_InvalidMemoryIndexed8UImm6:
6535   case Match_InvalidMemoryIndexed16UImm6:
6536   case Match_InvalidMemoryIndexedSImm6:
6537   case Match_InvalidMemoryIndexedSImm5:
6538   case Match_InvalidMemoryIndexedSImm8:
6539   case Match_InvalidMemoryIndexedSImm9:
6540   case Match_InvalidMemoryIndexed16SImm9:
6541   case Match_InvalidMemoryIndexed8SImm10:
6542   case Match_InvalidImm0_0:
6543   case Match_InvalidImm0_1:
6544   case Match_InvalidImm0_3:
6545   case Match_InvalidImm0_7:
6546   case Match_InvalidImm0_15:
6547   case Match_InvalidImm0_31:
6548   case Match_InvalidImm0_63:
6549   case Match_InvalidImm0_127:
6550   case Match_InvalidImm0_255:
6551   case Match_InvalidImm0_65535:
6552   case Match_InvalidImm1_8:
6553   case Match_InvalidImm1_16:
6554   case Match_InvalidImm1_32:
6555   case Match_InvalidImm1_64:
6556   case Match_InvalidMemoryIndexedRange2UImm0:
6557   case Match_InvalidMemoryIndexedRange2UImm1:
6558   case Match_InvalidMemoryIndexedRange2UImm2:
6559   case Match_InvalidMemoryIndexedRange2UImm3:
6560   case Match_InvalidMemoryIndexedRange4UImm0:
6561   case Match_InvalidMemoryIndexedRange4UImm1:
6562   case Match_InvalidMemoryIndexedRange4UImm2:
6563   case Match_InvalidSVEAddSubImm8:
6564   case Match_InvalidSVEAddSubImm16:
6565   case Match_InvalidSVEAddSubImm32:
6566   case Match_InvalidSVEAddSubImm64:
6567   case Match_InvalidSVECpyImm8:
6568   case Match_InvalidSVECpyImm16:
6569   case Match_InvalidSVECpyImm32:
6570   case Match_InvalidSVECpyImm64:
6571   case Match_InvalidIndexRange0_0:
6572   case Match_InvalidIndexRange1_1:
6573   case Match_InvalidIndexRange0_15:
6574   case Match_InvalidIndexRange0_7:
6575   case Match_InvalidIndexRange0_3:
6576   case Match_InvalidIndexRange0_1:
6577   case Match_InvalidSVEIndexRange0_63:
6578   case Match_InvalidSVEIndexRange0_31:
6579   case Match_InvalidSVEIndexRange0_15:
6580   case Match_InvalidSVEIndexRange0_7:
6581   case Match_InvalidSVEIndexRange0_3:
6582   case Match_InvalidLabel:
6583   case Match_InvalidComplexRotationEven:
6584   case Match_InvalidComplexRotationOdd:
6585   case Match_InvalidGPR64shifted8:
6586   case Match_InvalidGPR64shifted16:
6587   case Match_InvalidGPR64shifted32:
6588   case Match_InvalidGPR64shifted64:
6589   case Match_InvalidGPR64shifted128:
6590   case Match_InvalidGPR64NoXZRshifted8:
6591   case Match_InvalidGPR64NoXZRshifted16:
6592   case Match_InvalidGPR64NoXZRshifted32:
6593   case Match_InvalidGPR64NoXZRshifted64:
6594   case Match_InvalidGPR64NoXZRshifted128:
6595   case Match_InvalidZPR32UXTW8:
6596   case Match_InvalidZPR32UXTW16:
6597   case Match_InvalidZPR32UXTW32:
6598   case Match_InvalidZPR32UXTW64:
6599   case Match_InvalidZPR32SXTW8:
6600   case Match_InvalidZPR32SXTW16:
6601   case Match_InvalidZPR32SXTW32:
6602   case Match_InvalidZPR32SXTW64:
6603   case Match_InvalidZPR64UXTW8:
6604   case Match_InvalidZPR64SXTW8:
6605   case Match_InvalidZPR64UXTW16:
6606   case Match_InvalidZPR64SXTW16:
6607   case Match_InvalidZPR64UXTW32:
6608   case Match_InvalidZPR64SXTW32:
6609   case Match_InvalidZPR64UXTW64:
6610   case Match_InvalidZPR64SXTW64:
6611   case Match_InvalidZPR32LSL8:
6612   case Match_InvalidZPR32LSL16:
6613   case Match_InvalidZPR32LSL32:
6614   case Match_InvalidZPR32LSL64:
6615   case Match_InvalidZPR64LSL8:
6616   case Match_InvalidZPR64LSL16:
6617   case Match_InvalidZPR64LSL32:
6618   case Match_InvalidZPR64LSL64:
6619   case Match_InvalidZPR0:
6620   case Match_InvalidZPR8:
6621   case Match_InvalidZPR16:
6622   case Match_InvalidZPR32:
6623   case Match_InvalidZPR64:
6624   case Match_InvalidZPR128:
6625   case Match_InvalidZPR_3b8:
6626   case Match_InvalidZPR_3b16:
6627   case Match_InvalidZPR_3b32:
6628   case Match_InvalidZPR_4b8:
6629   case Match_InvalidZPR_4b16:
6630   case Match_InvalidZPR_4b32:
6631   case Match_InvalidZPR_4b64:
6632   case Match_InvalidSVEPredicateAnyReg:
6633   case Match_InvalidSVEPattern:
6634   case Match_InvalidSVEVecLenSpecifier:
6635   case Match_InvalidSVEPredicateBReg:
6636   case Match_InvalidSVEPredicateHReg:
6637   case Match_InvalidSVEPredicateSReg:
6638   case Match_InvalidSVEPredicateDReg:
6639   case Match_InvalidSVEPredicate3bAnyReg:
6640   case Match_InvalidSVEPNPredicateB_p8to15Reg:
6641   case Match_InvalidSVEPNPredicateH_p8to15Reg:
6642   case Match_InvalidSVEPNPredicateS_p8to15Reg:
6643   case Match_InvalidSVEPNPredicateD_p8to15Reg:
6644   case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6645   case Match_InvalidSVEPNPredicateBReg:
6646   case Match_InvalidSVEPNPredicateHReg:
6647   case Match_InvalidSVEPNPredicateSReg:
6648   case Match_InvalidSVEPNPredicateDReg:
6649   case Match_InvalidSVEPredicateListMul2x8:
6650   case Match_InvalidSVEPredicateListMul2x16:
6651   case Match_InvalidSVEPredicateListMul2x32:
6652   case Match_InvalidSVEPredicateListMul2x64:
6653   case Match_InvalidSVEExactFPImmOperandHalfOne:
6654   case Match_InvalidSVEExactFPImmOperandHalfTwo:
6655   case Match_InvalidSVEExactFPImmOperandZeroOne:
6656   case Match_InvalidMatrixTile32:
6657   case Match_InvalidMatrixTile64:
6658   case Match_InvalidMatrix:
6659   case Match_InvalidMatrix8:
6660   case Match_InvalidMatrix16:
6661   case Match_InvalidMatrix32:
6662   case Match_InvalidMatrix64:
6663   case Match_InvalidMatrixTileVectorH8:
6664   case Match_InvalidMatrixTileVectorH16:
6665   case Match_InvalidMatrixTileVectorH32:
6666   case Match_InvalidMatrixTileVectorH64:
6667   case Match_InvalidMatrixTileVectorH128:
6668   case Match_InvalidMatrixTileVectorV8:
6669   case Match_InvalidMatrixTileVectorV16:
6670   case Match_InvalidMatrixTileVectorV32:
6671   case Match_InvalidMatrixTileVectorV64:
6672   case Match_InvalidMatrixTileVectorV128:
6673   case Match_InvalidSVCR:
6674   case Match_InvalidMatrixIndexGPR32_12_15:
6675   case Match_InvalidMatrixIndexGPR32_8_11:
6676   case Match_InvalidLookupTable:
6677   case Match_InvalidSVEVectorListMul2x8:
6678   case Match_InvalidSVEVectorListMul2x16:
6679   case Match_InvalidSVEVectorListMul2x32:
6680   case Match_InvalidSVEVectorListMul2x64:
6681   case Match_InvalidSVEVectorListMul4x8:
6682   case Match_InvalidSVEVectorListMul4x16:
6683   case Match_InvalidSVEVectorListMul4x32:
6684   case Match_InvalidSVEVectorListMul4x64:
6685   case Match_InvalidSVEVectorListStrided2x8:
6686   case Match_InvalidSVEVectorListStrided2x16:
6687   case Match_InvalidSVEVectorListStrided2x32:
6688   case Match_InvalidSVEVectorListStrided2x64:
6689   case Match_InvalidSVEVectorListStrided4x8:
6690   case Match_InvalidSVEVectorListStrided4x16:
6691   case Match_InvalidSVEVectorListStrided4x32:
6692   case Match_InvalidSVEVectorListStrided4x64:
6693   case Match_MSR:
6694   case Match_MRS: {
6695     if (ErrorInfo >= Operands.size())
6696       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6697     // Any time we get here, there's nothing fancy to do. Just get the
6698     // operand SMLoc and display the diagnostic.
6699     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6700     if (ErrorLoc == SMLoc())
6701       ErrorLoc = IDLoc;
6702     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6703   }
6704   }
6705 
6706   llvm_unreachable("Implement any new match types added!");
6707 }
6708 
6709 /// ParseDirective parses the arm specific directives
6710 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6711   const MCContext::Environment Format = getContext().getObjectFileType();
6712   bool IsMachO = Format == MCContext::IsMachO;
6713   bool IsCOFF = Format == MCContext::IsCOFF;
6714 
6715   auto IDVal = DirectiveID.getIdentifier().lower();
6716   SMLoc Loc = DirectiveID.getLoc();
6717   if (IDVal == ".arch")
6718     parseDirectiveArch(Loc);
6719   else if (IDVal == ".cpu")
6720     parseDirectiveCPU(Loc);
6721   else if (IDVal == ".tlsdesccall")
6722     parseDirectiveTLSDescCall(Loc);
6723   else if (IDVal == ".ltorg" || IDVal == ".pool")
6724     parseDirectiveLtorg(Loc);
6725   else if (IDVal == ".unreq")
6726     parseDirectiveUnreq(Loc);
6727   else if (IDVal == ".inst")
6728     parseDirectiveInst(Loc);
6729   else if (IDVal == ".cfi_negate_ra_state")
6730     parseDirectiveCFINegateRAState();
6731   else if (IDVal == ".cfi_b_key_frame")
6732     parseDirectiveCFIBKeyFrame();
6733   else if (IDVal == ".cfi_mte_tagged_frame")
6734     parseDirectiveCFIMTETaggedFrame();
6735   else if (IDVal == ".arch_extension")
6736     parseDirectiveArchExtension(Loc);
6737   else if (IDVal == ".variant_pcs")
6738     parseDirectiveVariantPCS(Loc);
6739   else if (IsMachO) {
6740     if (IDVal == MCLOHDirectiveName())
6741       parseDirectiveLOH(IDVal, Loc);
6742     else
6743       return true;
6744   } else if (IsCOFF) {
6745     if (IDVal == ".seh_stackalloc")
6746       parseDirectiveSEHAllocStack(Loc);
6747     else if (IDVal == ".seh_endprologue")
6748       parseDirectiveSEHPrologEnd(Loc);
6749     else if (IDVal == ".seh_save_r19r20_x")
6750       parseDirectiveSEHSaveR19R20X(Loc);
6751     else if (IDVal == ".seh_save_fplr")
6752       parseDirectiveSEHSaveFPLR(Loc);
6753     else if (IDVal == ".seh_save_fplr_x")
6754       parseDirectiveSEHSaveFPLRX(Loc);
6755     else if (IDVal == ".seh_save_reg")
6756       parseDirectiveSEHSaveReg(Loc);
6757     else if (IDVal == ".seh_save_reg_x")
6758       parseDirectiveSEHSaveRegX(Loc);
6759     else if (IDVal == ".seh_save_regp")
6760       parseDirectiveSEHSaveRegP(Loc);
6761     else if (IDVal == ".seh_save_regp_x")
6762       parseDirectiveSEHSaveRegPX(Loc);
6763     else if (IDVal == ".seh_save_lrpair")
6764       parseDirectiveSEHSaveLRPair(Loc);
6765     else if (IDVal == ".seh_save_freg")
6766       parseDirectiveSEHSaveFReg(Loc);
6767     else if (IDVal == ".seh_save_freg_x")
6768       parseDirectiveSEHSaveFRegX(Loc);
6769     else if (IDVal == ".seh_save_fregp")
6770       parseDirectiveSEHSaveFRegP(Loc);
6771     else if (IDVal == ".seh_save_fregp_x")
6772       parseDirectiveSEHSaveFRegPX(Loc);
6773     else if (IDVal == ".seh_set_fp")
6774       parseDirectiveSEHSetFP(Loc);
6775     else if (IDVal == ".seh_add_fp")
6776       parseDirectiveSEHAddFP(Loc);
6777     else if (IDVal == ".seh_nop")
6778       parseDirectiveSEHNop(Loc);
6779     else if (IDVal == ".seh_save_next")
6780       parseDirectiveSEHSaveNext(Loc);
6781     else if (IDVal == ".seh_startepilogue")
6782       parseDirectiveSEHEpilogStart(Loc);
6783     else if (IDVal == ".seh_endepilogue")
6784       parseDirectiveSEHEpilogEnd(Loc);
6785     else if (IDVal == ".seh_trap_frame")
6786       parseDirectiveSEHTrapFrame(Loc);
6787     else if (IDVal == ".seh_pushframe")
6788       parseDirectiveSEHMachineFrame(Loc);
6789     else if (IDVal == ".seh_context")
6790       parseDirectiveSEHContext(Loc);
6791     else if (IDVal == ".seh_ec_context")
6792       parseDirectiveSEHECContext(Loc);
6793     else if (IDVal == ".seh_clear_unwound_to_call")
6794       parseDirectiveSEHClearUnwoundToCall(Loc);
6795     else if (IDVal == ".seh_pac_sign_lr")
6796       parseDirectiveSEHPACSignLR(Loc);
6797     else if (IDVal == ".seh_save_any_reg")
6798       parseDirectiveSEHSaveAnyReg(Loc, false, false);
6799     else if (IDVal == ".seh_save_any_reg_p")
6800       parseDirectiveSEHSaveAnyReg(Loc, true, false);
6801     else if (IDVal == ".seh_save_any_reg_x")
6802       parseDirectiveSEHSaveAnyReg(Loc, false, true);
6803     else if (IDVal == ".seh_save_any_reg_px")
6804       parseDirectiveSEHSaveAnyReg(Loc, true, true);
6805     else
6806       return true;
6807   } else
6808     return true;
6809   return false;
6810 }
6811 
6812 static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6813                             SmallVector<StringRef, 4> &RequestedExtensions) {
6814   const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6815   const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6816 
6817   if (!NoCrypto && Crypto) {
6818     // Map 'generic' (and others) to sha2 and aes, because
6819     // that was the traditional meaning of crypto.
6820     if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6821         ArchInfo == AArch64::ARMV8_3A) {
6822       RequestedExtensions.push_back("sha2");
6823       RequestedExtensions.push_back("aes");
6824     }
6825     if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6826         ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6827         ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6828         ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6829         ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6830         ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6831       RequestedExtensions.push_back("sm4");
6832       RequestedExtensions.push_back("sha3");
6833       RequestedExtensions.push_back("sha2");
6834       RequestedExtensions.push_back("aes");
6835     }
6836   } else if (NoCrypto) {
6837     // Map 'generic' (and others) to sha2 and aes, because
6838     // that was the traditional meaning of crypto.
6839     if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6840         ArchInfo == AArch64::ARMV8_3A) {
6841       RequestedExtensions.push_back("nosha2");
6842       RequestedExtensions.push_back("noaes");
6843     }
6844     if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6845         ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6846         ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6847         ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6848         ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6849         ArchInfo == AArch64::ARMV9_4A) {
6850       RequestedExtensions.push_back("nosm4");
6851       RequestedExtensions.push_back("nosha3");
6852       RequestedExtensions.push_back("nosha2");
6853       RequestedExtensions.push_back("noaes");
6854     }
6855   }
6856 }
6857 
6858 /// parseDirectiveArch
6859 ///   ::= .arch token
6860 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6861   SMLoc ArchLoc = getLoc();
6862 
6863   StringRef Arch, ExtensionString;
6864   std::tie(Arch, ExtensionString) =
6865       getParser().parseStringToEndOfStatement().trim().split('+');
6866 
6867   std::optional<AArch64::ArchInfo> ArchInfo = AArch64::parseArch(Arch);
6868   if (!ArchInfo)
6869     return Error(ArchLoc, "unknown arch name");
6870 
6871   if (parseToken(AsmToken::EndOfStatement))
6872     return true;
6873 
6874   // Get the architecture and extension features.
6875   std::vector<StringRef> AArch64Features;
6876   AArch64Features.push_back(ArchInfo->ArchFeature);
6877   AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
6878 
6879   MCSubtargetInfo &STI = copySTI();
6880   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6881   STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6882                          join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6883 
6884   SmallVector<StringRef, 4> RequestedExtensions;
6885   if (!ExtensionString.empty())
6886     ExtensionString.split(RequestedExtensions, '+');
6887 
6888   ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
6889 
6890   FeatureBitset Features = STI.getFeatureBits();
6891   setAvailableFeatures(ComputeAvailableFeatures(Features));
6892   for (auto Name : RequestedExtensions) {
6893     bool EnableFeature = true;
6894 
6895     if (Name.starts_with_insensitive("no")) {
6896       EnableFeature = false;
6897       Name = Name.substr(2);
6898     }
6899 
6900     for (const auto &Extension : ExtensionMap) {
6901       if (Extension.Name != Name)
6902         continue;
6903 
6904       if (Extension.Features.none())
6905         report_fatal_error("unsupported architectural extension: " + Name);
6906 
6907       FeatureBitset ToggleFeatures =
6908           EnableFeature
6909               ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6910               : STI.ToggleFeature(Features & Extension.Features);
6911       setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6912       break;
6913     }
6914   }
6915   return false;
6916 }
6917 
6918 /// parseDirectiveArchExtension
6919 ///   ::= .arch_extension [no]feature
6920 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6921   SMLoc ExtLoc = getLoc();
6922 
6923   StringRef Name = getParser().parseStringToEndOfStatement().trim();
6924 
6925   if (parseEOL())
6926     return true;
6927 
6928   bool EnableFeature = true;
6929   if (Name.starts_with_insensitive("no")) {
6930     EnableFeature = false;
6931     Name = Name.substr(2);
6932   }
6933 
6934   MCSubtargetInfo &STI = copySTI();
6935   FeatureBitset Features = STI.getFeatureBits();
6936   for (const auto &Extension : ExtensionMap) {
6937     if (Extension.Name != Name)
6938       continue;
6939 
6940     if (Extension.Features.none())
6941       return Error(ExtLoc, "unsupported architectural extension: " + Name);
6942 
6943     FeatureBitset ToggleFeatures =
6944         EnableFeature
6945             ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6946             : STI.ToggleFeature(Features & Extension.Features);
6947     setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6948     return false;
6949   }
6950 
6951   return Error(ExtLoc, "unknown architectural extension: " + Name);
6952 }
6953 
6954 static SMLoc incrementLoc(SMLoc L, int Offset) {
6955   return SMLoc::getFromPointer(L.getPointer() + Offset);
6956 }
6957 
6958 /// parseDirectiveCPU
6959 ///   ::= .cpu id
6960 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6961   SMLoc CurLoc = getLoc();
6962 
6963   StringRef CPU, ExtensionString;
6964   std::tie(CPU, ExtensionString) =
6965       getParser().parseStringToEndOfStatement().trim().split('+');
6966 
6967   if (parseToken(AsmToken::EndOfStatement))
6968     return true;
6969 
6970   SmallVector<StringRef, 4> RequestedExtensions;
6971   if (!ExtensionString.empty())
6972     ExtensionString.split(RequestedExtensions, '+');
6973 
6974   const std::optional<llvm::AArch64::ArchInfo> CpuArch = llvm::AArch64::getArchForCpu(CPU);
6975   if (!CpuArch) {
6976     Error(CurLoc, "unknown CPU name");
6977     return false;
6978   }
6979   ExpandCryptoAEK(*CpuArch, RequestedExtensions);
6980 
6981   MCSubtargetInfo &STI = copySTI();
6982   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
6983   CurLoc = incrementLoc(CurLoc, CPU.size());
6984 
6985   for (auto Name : RequestedExtensions) {
6986     // Advance source location past '+'.
6987     CurLoc = incrementLoc(CurLoc, 1);
6988 
6989     bool EnableFeature = true;
6990 
6991     if (Name.starts_with_insensitive("no")) {
6992       EnableFeature = false;
6993       Name = Name.substr(2);
6994     }
6995 
6996     bool FoundExtension = false;
6997     for (const auto &Extension : ExtensionMap) {
6998       if (Extension.Name != Name)
6999         continue;
7000 
7001       if (Extension.Features.none())
7002         report_fatal_error("unsupported architectural extension: " + Name);
7003 
7004       FeatureBitset Features = STI.getFeatureBits();
7005       FeatureBitset ToggleFeatures =
7006           EnableFeature
7007               ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
7008               : STI.ToggleFeature(Features & Extension.Features);
7009       setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
7010       FoundExtension = true;
7011 
7012       break;
7013     }
7014 
7015     if (!FoundExtension)
7016       Error(CurLoc, "unsupported architectural extension");
7017 
7018     CurLoc = incrementLoc(CurLoc, Name.size());
7019   }
7020   return false;
7021 }
7022 
7023 /// parseDirectiveInst
7024 ///  ::= .inst opcode [, ...]
7025 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7026   if (getLexer().is(AsmToken::EndOfStatement))
7027     return Error(Loc, "expected expression following '.inst' directive");
7028 
7029   auto parseOp = [&]() -> bool {
7030     SMLoc L = getLoc();
7031     const MCExpr *Expr = nullptr;
7032     if (check(getParser().parseExpression(Expr), L, "expected expression"))
7033       return true;
7034     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7035     if (check(!Value, L, "expected constant expression"))
7036       return true;
7037     getTargetStreamer().emitInst(Value->getValue());
7038     return false;
7039   };
7040 
7041   return parseMany(parseOp);
7042 }
7043 
7044 // parseDirectiveTLSDescCall:
7045 //   ::= .tlsdesccall symbol
7046 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7047   StringRef Name;
7048   if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7049       parseToken(AsmToken::EndOfStatement))
7050     return true;
7051 
7052   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7053   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7054   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
7055 
7056   MCInst Inst;
7057   Inst.setOpcode(AArch64::TLSDESCCALL);
7058   Inst.addOperand(MCOperand::createExpr(Expr));
7059 
7060   getParser().getStreamer().emitInstruction(Inst, getSTI());
7061   return false;
7062 }
7063 
7064 /// ::= .loh <lohName | lohId> label1, ..., labelN
7065 /// The number of arguments depends on the loh identifier.
7066 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7067   MCLOHType Kind;
7068   if (getTok().isNot(AsmToken::Identifier)) {
7069     if (getTok().isNot(AsmToken::Integer))
7070       return TokError("expected an identifier or a number in directive");
7071     // We successfully get a numeric value for the identifier.
7072     // Check if it is valid.
7073     int64_t Id = getTok().getIntVal();
7074     if (Id <= -1U && !isValidMCLOHType(Id))
7075       return TokError("invalid numeric identifier in directive");
7076     Kind = (MCLOHType)Id;
7077   } else {
7078     StringRef Name = getTok().getIdentifier();
7079     // We successfully parse an identifier.
7080     // Check if it is a recognized one.
7081     int Id = MCLOHNameToId(Name);
7082 
7083     if (Id == -1)
7084       return TokError("invalid identifier in directive");
7085     Kind = (MCLOHType)Id;
7086   }
7087   // Consume the identifier.
7088   Lex();
7089   // Get the number of arguments of this LOH.
7090   int NbArgs = MCLOHIdToNbArgs(Kind);
7091 
7092   assert(NbArgs != -1 && "Invalid number of arguments");
7093 
7094   SmallVector<MCSymbol *, 3> Args;
7095   for (int Idx = 0; Idx < NbArgs; ++Idx) {
7096     StringRef Name;
7097     if (getParser().parseIdentifier(Name))
7098       return TokError("expected identifier in directive");
7099     Args.push_back(getContext().getOrCreateSymbol(Name));
7100 
7101     if (Idx + 1 == NbArgs)
7102       break;
7103     if (parseComma())
7104       return true;
7105   }
7106   if (parseEOL())
7107     return true;
7108 
7109   getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7110   return false;
7111 }
7112 
7113 /// parseDirectiveLtorg
7114 ///  ::= .ltorg | .pool
7115 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7116   if (parseEOL())
7117     return true;
7118   getTargetStreamer().emitCurrentConstantPool();
7119   return false;
7120 }
7121 
7122 /// parseDirectiveReq
7123 ///  ::= name .req registername
7124 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7125   Lex(); // Eat the '.req' token.
7126   SMLoc SRegLoc = getLoc();
7127   RegKind RegisterKind = RegKind::Scalar;
7128   MCRegister RegNum;
7129   ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7130 
7131   if (!ParseRes.isSuccess()) {
7132     StringRef Kind;
7133     RegisterKind = RegKind::NeonVector;
7134     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7135 
7136     if (ParseRes.isFailure())
7137       return true;
7138 
7139     if (ParseRes.isSuccess() && !Kind.empty())
7140       return Error(SRegLoc, "vector register without type specifier expected");
7141   }
7142 
7143   if (!ParseRes.isSuccess()) {
7144     StringRef Kind;
7145     RegisterKind = RegKind::SVEDataVector;
7146     ParseRes =
7147         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7148 
7149     if (ParseRes.isFailure())
7150       return true;
7151 
7152     if (ParseRes.isSuccess() && !Kind.empty())
7153       return Error(SRegLoc,
7154                    "sve vector register without type specifier expected");
7155   }
7156 
7157   if (!ParseRes.isSuccess()) {
7158     StringRef Kind;
7159     RegisterKind = RegKind::SVEPredicateVector;
7160     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7161 
7162     if (ParseRes.isFailure())
7163       return true;
7164 
7165     if (ParseRes.isSuccess() && !Kind.empty())
7166       return Error(SRegLoc,
7167                    "sve predicate register without type specifier expected");
7168   }
7169 
7170   if (!ParseRes.isSuccess())
7171     return Error(SRegLoc, "register name or alias expected");
7172 
7173   // Shouldn't be anything else.
7174   if (parseEOL())
7175     return true;
7176 
7177   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7178   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7179     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7180 
7181   return false;
7182 }
7183 
7184 /// parseDirectiveUneq
7185 ///  ::= .unreq registername
7186 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7187   if (getTok().isNot(AsmToken::Identifier))
7188     return TokError("unexpected input in .unreq directive.");
7189   RegisterReqs.erase(getTok().getIdentifier().lower());
7190   Lex(); // Eat the identifier.
7191   return parseToken(AsmToken::EndOfStatement);
7192 }
7193 
7194 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7195   if (parseEOL())
7196     return true;
7197   getStreamer().emitCFINegateRAState();
7198   return false;
7199 }
7200 
7201 /// parseDirectiveCFIBKeyFrame
7202 /// ::= .cfi_b_key
7203 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7204   if (parseEOL())
7205     return true;
7206   getStreamer().emitCFIBKeyFrame();
7207   return false;
7208 }
7209 
7210 /// parseDirectiveCFIMTETaggedFrame
7211 /// ::= .cfi_mte_tagged_frame
7212 bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7213   if (parseEOL())
7214     return true;
7215   getStreamer().emitCFIMTETaggedFrame();
7216   return false;
7217 }
7218 
7219 /// parseDirectiveVariantPCS
7220 /// ::= .variant_pcs symbolname
7221 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7222   StringRef Name;
7223   if (getParser().parseIdentifier(Name))
7224     return TokError("expected symbol name");
7225   if (parseEOL())
7226     return true;
7227   getTargetStreamer().emitDirectiveVariantPCS(
7228       getContext().getOrCreateSymbol(Name));
7229   return false;
7230 }
7231 
7232 /// parseDirectiveSEHAllocStack
7233 /// ::= .seh_stackalloc
7234 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7235   int64_t Size;
7236   if (parseImmExpr(Size))
7237     return true;
7238   getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7239   return false;
7240 }
7241 
7242 /// parseDirectiveSEHPrologEnd
7243 /// ::= .seh_endprologue
7244 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7245   getTargetStreamer().emitARM64WinCFIPrologEnd();
7246   return false;
7247 }
7248 
7249 /// parseDirectiveSEHSaveR19R20X
7250 /// ::= .seh_save_r19r20_x
7251 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7252   int64_t Offset;
7253   if (parseImmExpr(Offset))
7254     return true;
7255   getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7256   return false;
7257 }
7258 
7259 /// parseDirectiveSEHSaveFPLR
7260 /// ::= .seh_save_fplr
7261 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7262   int64_t Offset;
7263   if (parseImmExpr(Offset))
7264     return true;
7265   getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7266   return false;
7267 }
7268 
7269 /// parseDirectiveSEHSaveFPLRX
7270 /// ::= .seh_save_fplr_x
7271 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7272   int64_t Offset;
7273   if (parseImmExpr(Offset))
7274     return true;
7275   getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7276   return false;
7277 }
7278 
7279 /// parseDirectiveSEHSaveReg
7280 /// ::= .seh_save_reg
7281 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7282   unsigned Reg;
7283   int64_t Offset;
7284   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7285       parseComma() || parseImmExpr(Offset))
7286     return true;
7287   getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7288   return false;
7289 }
7290 
7291 /// parseDirectiveSEHSaveRegX
7292 /// ::= .seh_save_reg_x
7293 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7294   unsigned Reg;
7295   int64_t Offset;
7296   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7297       parseComma() || parseImmExpr(Offset))
7298     return true;
7299   getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7300   return false;
7301 }
7302 
7303 /// parseDirectiveSEHSaveRegP
7304 /// ::= .seh_save_regp
7305 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7306   unsigned Reg;
7307   int64_t Offset;
7308   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7309       parseComma() || parseImmExpr(Offset))
7310     return true;
7311   getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7312   return false;
7313 }
7314 
7315 /// parseDirectiveSEHSaveRegPX
7316 /// ::= .seh_save_regp_x
7317 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7318   unsigned Reg;
7319   int64_t Offset;
7320   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7321       parseComma() || parseImmExpr(Offset))
7322     return true;
7323   getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7324   return false;
7325 }
7326 
7327 /// parseDirectiveSEHSaveLRPair
7328 /// ::= .seh_save_lrpair
7329 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7330   unsigned Reg;
7331   int64_t Offset;
7332   L = getLoc();
7333   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7334       parseComma() || parseImmExpr(Offset))
7335     return true;
7336   if (check(((Reg - 19) % 2 != 0), L,
7337             "expected register with even offset from x19"))
7338     return true;
7339   getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7340   return false;
7341 }
7342 
7343 /// parseDirectiveSEHSaveFReg
7344 /// ::= .seh_save_freg
7345 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7346   unsigned Reg;
7347   int64_t Offset;
7348   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7349       parseComma() || parseImmExpr(Offset))
7350     return true;
7351   getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7352   return false;
7353 }
7354 
7355 /// parseDirectiveSEHSaveFRegX
7356 /// ::= .seh_save_freg_x
7357 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7358   unsigned Reg;
7359   int64_t Offset;
7360   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7361       parseComma() || parseImmExpr(Offset))
7362     return true;
7363   getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7364   return false;
7365 }
7366 
7367 /// parseDirectiveSEHSaveFRegP
7368 /// ::= .seh_save_fregp
7369 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7370   unsigned Reg;
7371   int64_t Offset;
7372   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7373       parseComma() || parseImmExpr(Offset))
7374     return true;
7375   getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7376   return false;
7377 }
7378 
7379 /// parseDirectiveSEHSaveFRegPX
7380 /// ::= .seh_save_fregp_x
7381 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7382   unsigned Reg;
7383   int64_t Offset;
7384   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7385       parseComma() || parseImmExpr(Offset))
7386     return true;
7387   getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7388   return false;
7389 }
7390 
7391 /// parseDirectiveSEHSetFP
7392 /// ::= .seh_set_fp
7393 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7394   getTargetStreamer().emitARM64WinCFISetFP();
7395   return false;
7396 }
7397 
7398 /// parseDirectiveSEHAddFP
7399 /// ::= .seh_add_fp
7400 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7401   int64_t Size;
7402   if (parseImmExpr(Size))
7403     return true;
7404   getTargetStreamer().emitARM64WinCFIAddFP(Size);
7405   return false;
7406 }
7407 
7408 /// parseDirectiveSEHNop
7409 /// ::= .seh_nop
7410 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7411   getTargetStreamer().emitARM64WinCFINop();
7412   return false;
7413 }
7414 
7415 /// parseDirectiveSEHSaveNext
7416 /// ::= .seh_save_next
7417 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7418   getTargetStreamer().emitARM64WinCFISaveNext();
7419   return false;
7420 }
7421 
7422 /// parseDirectiveSEHEpilogStart
7423 /// ::= .seh_startepilogue
7424 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7425   getTargetStreamer().emitARM64WinCFIEpilogStart();
7426   return false;
7427 }
7428 
7429 /// parseDirectiveSEHEpilogEnd
7430 /// ::= .seh_endepilogue
7431 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7432   getTargetStreamer().emitARM64WinCFIEpilogEnd();
7433   return false;
7434 }
7435 
7436 /// parseDirectiveSEHTrapFrame
7437 /// ::= .seh_trap_frame
7438 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7439   getTargetStreamer().emitARM64WinCFITrapFrame();
7440   return false;
7441 }
7442 
7443 /// parseDirectiveSEHMachineFrame
7444 /// ::= .seh_pushframe
7445 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7446   getTargetStreamer().emitARM64WinCFIMachineFrame();
7447   return false;
7448 }
7449 
7450 /// parseDirectiveSEHContext
7451 /// ::= .seh_context
7452 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7453   getTargetStreamer().emitARM64WinCFIContext();
7454   return false;
7455 }
7456 
7457 /// parseDirectiveSEHECContext
7458 /// ::= .seh_ec_context
7459 bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7460   getTargetStreamer().emitARM64WinCFIECContext();
7461   return false;
7462 }
7463 
7464 /// parseDirectiveSEHClearUnwoundToCall
7465 /// ::= .seh_clear_unwound_to_call
7466 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7467   getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7468   return false;
7469 }
7470 
7471 /// parseDirectiveSEHPACSignLR
7472 /// ::= .seh_pac_sign_lr
7473 bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7474   getTargetStreamer().emitARM64WinCFIPACSignLR();
7475   return false;
7476 }
7477 
7478 /// parseDirectiveSEHSaveAnyReg
7479 /// ::= .seh_save_any_reg
7480 /// ::= .seh_save_any_reg_p
7481 /// ::= .seh_save_any_reg_x
7482 /// ::= .seh_save_any_reg_px
7483 bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7484                                                    bool Writeback) {
7485   MCRegister Reg;
7486   SMLoc Start, End;
7487   int64_t Offset;
7488   if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7489       parseComma() || parseImmExpr(Offset))
7490     return true;
7491 
7492   if (Reg == AArch64::FP || Reg == AArch64::LR ||
7493       (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7494     if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7495       return Error(L, "invalid save_any_reg offset");
7496     unsigned EncodedReg;
7497     if (Reg == AArch64::FP)
7498       EncodedReg = 29;
7499     else if (Reg == AArch64::LR)
7500       EncodedReg = 30;
7501     else
7502       EncodedReg = Reg - AArch64::X0;
7503     if (Paired) {
7504       if (Reg == AArch64::LR)
7505         return Error(Start, "lr cannot be paired with another register");
7506       if (Writeback)
7507         getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7508       else
7509         getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7510     } else {
7511       if (Writeback)
7512         getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7513       else
7514         getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7515     }
7516   } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7517     unsigned EncodedReg = Reg - AArch64::D0;
7518     if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7519       return Error(L, "invalid save_any_reg offset");
7520     if (Paired) {
7521       if (Reg == AArch64::D31)
7522         return Error(Start, "d31 cannot be paired with another register");
7523       if (Writeback)
7524         getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7525       else
7526         getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7527     } else {
7528       if (Writeback)
7529         getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7530       else
7531         getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7532     }
7533   } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7534     unsigned EncodedReg = Reg - AArch64::Q0;
7535     if (Offset < 0 || Offset % 16)
7536       return Error(L, "invalid save_any_reg offset");
7537     if (Paired) {
7538       if (Reg == AArch64::Q31)
7539         return Error(Start, "q31 cannot be paired with another register");
7540       if (Writeback)
7541         getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7542       else
7543         getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7544     } else {
7545       if (Writeback)
7546         getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7547       else
7548         getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7549     }
7550   } else {
7551     return Error(Start, "save_any_reg register must be x, q or d register");
7552   }
7553   return false;
7554 }
7555 
7556 bool AArch64AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7557   // Try @AUTH expressions: they're more complex than the usual symbol variants.
7558   if (!parseAuthExpr(Res, EndLoc))
7559     return false;
7560   return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
7561 }
7562 
7563 ///  parseAuthExpr
7564 ///  ::= _sym@AUTH(ib,123[,addr])
7565 ///  ::= (_sym + 5)@AUTH(ib,123[,addr])
7566 ///  ::= (_sym - 5)@AUTH(ib,123[,addr])
7567 bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
7568   MCAsmParser &Parser = getParser();
7569   MCContext &Ctx = getContext();
7570 
7571   AsmToken Tok = Parser.getTok();
7572 
7573   // Look for '_sym@AUTH' ...
7574   if (Tok.is(AsmToken::Identifier) && Tok.getIdentifier().ends_with("@AUTH")) {
7575     StringRef SymName = Tok.getIdentifier().drop_back(strlen("@AUTH"));
7576     if (SymName.contains('@'))
7577       return TokError(
7578           "combination of @AUTH with other modifiers not supported");
7579     Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7580 
7581     Parser.Lex(); // Eat the identifier.
7582   } else {
7583     // ... or look for a more complex symbol reference, such as ...
7584     SmallVector<AsmToken, 6> Tokens;
7585 
7586     // ... '"_long sym"@AUTH' ...
7587     if (Tok.is(AsmToken::String))
7588       Tokens.resize(2);
7589     // ... or '(_sym + 5)@AUTH'.
7590     else if (Tok.is(AsmToken::LParen))
7591       Tokens.resize(6);
7592     else
7593       return true;
7594 
7595     if (Parser.getLexer().peekTokens(Tokens) != Tokens.size())
7596       return true;
7597 
7598     // In either case, the expression ends with '@' 'AUTH'.
7599     if (Tokens[Tokens.size() - 2].isNot(AsmToken::At) ||
7600         Tokens[Tokens.size() - 1].isNot(AsmToken::Identifier) ||
7601         Tokens[Tokens.size() - 1].getIdentifier() != "AUTH")
7602       return true;
7603 
7604     if (Tok.is(AsmToken::String)) {
7605       StringRef SymName;
7606       if (Parser.parseIdentifier(SymName))
7607         return true;
7608       Res = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(SymName), Ctx);
7609     } else {
7610       if (Parser.parsePrimaryExpr(Res, EndLoc, nullptr))
7611         return true;
7612     }
7613 
7614     Parser.Lex(); // '@'
7615     Parser.Lex(); // 'AUTH'
7616   }
7617 
7618   // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
7619   if (parseToken(AsmToken::LParen, "expected '('"))
7620     return true;
7621 
7622   if (Parser.getTok().isNot(AsmToken::Identifier))
7623     return TokError("expected key name");
7624 
7625   StringRef KeyStr = Parser.getTok().getIdentifier();
7626   auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
7627   if (!KeyIDOrNone)
7628     return TokError("invalid key '" + KeyStr + "'");
7629   Parser.Lex();
7630 
7631   if (parseToken(AsmToken::Comma, "expected ','"))
7632     return true;
7633 
7634   if (Parser.getTok().isNot(AsmToken::Integer))
7635     return TokError("expected integer discriminator");
7636   int64_t Discriminator = Parser.getTok().getIntVal();
7637 
7638   if (!isUInt<16>(Discriminator))
7639     return TokError("integer discriminator " + Twine(Discriminator) +
7640                     " out of range [0, 0xFFFF]");
7641   Parser.Lex();
7642 
7643   bool UseAddressDiversity = false;
7644   if (Parser.getTok().is(AsmToken::Comma)) {
7645     Parser.Lex();
7646     if (Parser.getTok().isNot(AsmToken::Identifier) ||
7647         Parser.getTok().getIdentifier() != "addr")
7648       return TokError("expected 'addr'");
7649     UseAddressDiversity = true;
7650     Parser.Lex();
7651   }
7652 
7653   EndLoc = Parser.getTok().getEndLoc();
7654   if (parseToken(AsmToken::RParen, "expected ')'"))
7655     return true;
7656 
7657   Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
7658                                   UseAddressDiversity, Ctx);
7659   return false;
7660 }
7661 
7662 bool
7663 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7664                                     AArch64MCExpr::VariantKind &ELFRefKind,
7665                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
7666                                     int64_t &Addend) {
7667   ELFRefKind = AArch64MCExpr::VK_INVALID;
7668   DarwinRefKind = MCSymbolRefExpr::VK_None;
7669   Addend = 0;
7670 
7671   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7672     ELFRefKind = AE->getKind();
7673     Expr = AE->getSubExpr();
7674   }
7675 
7676   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7677   if (SE) {
7678     // It's a simple symbol reference with no addend.
7679     DarwinRefKind = SE->getKind();
7680     return true;
7681   }
7682 
7683   // Check that it looks like a symbol + an addend
7684   MCValue Res;
7685   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7686   if (!Relocatable || Res.getSymB())
7687     return false;
7688 
7689   // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7690   // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7691   if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7692     return false;
7693 
7694   if (Res.getSymA())
7695     DarwinRefKind = Res.getSymA()->getKind();
7696   Addend = Res.getConstant();
7697 
7698   // It's some symbol reference + a constant addend, but really
7699   // shouldn't use both Darwin and ELF syntax.
7700   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7701          DarwinRefKind == MCSymbolRefExpr::VK_None;
7702 }
7703 
7704 /// Force static initialization.
7705 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
7706   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
7707   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
7708   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
7709   RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
7710   RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
7711 }
7712 
7713 #define GET_REGISTER_MATCHER
7714 #define GET_SUBTARGET_FEATURE_NAME
7715 #define GET_MATCHER_IMPLEMENTATION
7716 #define GET_MNEMONIC_SPELL_CHECKER
7717 #include "AArch64GenAsmMatcher.inc"
7718 
7719 // Define this matcher function after the auto-generated include so we
7720 // have the match class enum definitions.
7721 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7722                                                       unsigned Kind) {
7723   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7724 
7725   auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7726     if (!Op.isImm())
7727       return Match_InvalidOperand;
7728     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7729     if (!CE)
7730       return Match_InvalidOperand;
7731     if (CE->getValue() == ExpectedVal)
7732       return Match_Success;
7733     return Match_InvalidOperand;
7734   };
7735 
7736   switch (Kind) {
7737   default:
7738     return Match_InvalidOperand;
7739   case MCK_MPR:
7740     // If the Kind is a token for the MPR register class which has the "za"
7741     // register (SME accumulator array), check if the asm is a literal "za"
7742     // token. This is for the "smstart za" alias that defines the register
7743     // as a literal token.
7744     if (Op.isTokenEqual("za"))
7745       return Match_Success;
7746     return Match_InvalidOperand;
7747 
7748     // If the kind is a token for a literal immediate, check if our asm operand
7749     // matches. This is for InstAliases which have a fixed-value immediate in
7750     // the asm string, such as hints which are parsed into a specific
7751     // instruction definition.
7752 #define MATCH_HASH(N)                                                          \
7753   case MCK__HASH_##N:                                                          \
7754     return MatchesOpImmediate(N);
7755     MATCH_HASH(0)
7756     MATCH_HASH(1)
7757     MATCH_HASH(2)
7758     MATCH_HASH(3)
7759     MATCH_HASH(4)
7760     MATCH_HASH(6)
7761     MATCH_HASH(7)
7762     MATCH_HASH(8)
7763     MATCH_HASH(10)
7764     MATCH_HASH(12)
7765     MATCH_HASH(14)
7766     MATCH_HASH(16)
7767     MATCH_HASH(24)
7768     MATCH_HASH(25)
7769     MATCH_HASH(26)
7770     MATCH_HASH(27)
7771     MATCH_HASH(28)
7772     MATCH_HASH(29)
7773     MATCH_HASH(30)
7774     MATCH_HASH(31)
7775     MATCH_HASH(32)
7776     MATCH_HASH(40)
7777     MATCH_HASH(48)
7778     MATCH_HASH(64)
7779 #undef MATCH_HASH
7780 #define MATCH_HASH_MINUS(N)                                                    \
7781   case MCK__HASH__MINUS_##N:                                                   \
7782     return MatchesOpImmediate(-N);
7783     MATCH_HASH_MINUS(4)
7784     MATCH_HASH_MINUS(8)
7785     MATCH_HASH_MINUS(16)
7786 #undef MATCH_HASH_MINUS
7787   }
7788 }
7789 
7790 ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7791 
7792   SMLoc S = getLoc();
7793 
7794   if (getTok().isNot(AsmToken::Identifier))
7795     return Error(S, "expected register");
7796 
7797   MCRegister FirstReg;
7798   ParseStatus Res = tryParseScalarRegister(FirstReg);
7799   if (!Res.isSuccess())
7800     return Error(S, "expected first even register of a consecutive same-size "
7801                     "even/odd register pair");
7802 
7803   const MCRegisterClass &WRegClass =
7804       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7805   const MCRegisterClass &XRegClass =
7806       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7807 
7808   bool isXReg = XRegClass.contains(FirstReg),
7809        isWReg = WRegClass.contains(FirstReg);
7810   if (!isXReg && !isWReg)
7811     return Error(S, "expected first even register of a consecutive same-size "
7812                     "even/odd register pair");
7813 
7814   const MCRegisterInfo *RI = getContext().getRegisterInfo();
7815   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7816 
7817   if (FirstEncoding & 0x1)
7818     return Error(S, "expected first even register of a consecutive same-size "
7819                     "even/odd register pair");
7820 
7821   if (getTok().isNot(AsmToken::Comma))
7822     return Error(getLoc(), "expected comma");
7823   // Eat the comma
7824   Lex();
7825 
7826   SMLoc E = getLoc();
7827   MCRegister SecondReg;
7828   Res = tryParseScalarRegister(SecondReg);
7829   if (!Res.isSuccess())
7830     return Error(E, "expected second odd register of a consecutive same-size "
7831                     "even/odd register pair");
7832 
7833   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7834       (isXReg && !XRegClass.contains(SecondReg)) ||
7835       (isWReg && !WRegClass.contains(SecondReg)))
7836     return Error(E, "expected second odd register of a consecutive same-size "
7837                     "even/odd register pair");
7838 
7839   unsigned Pair = 0;
7840   if (isXReg) {
7841     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7842            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7843   } else {
7844     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7845            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7846   }
7847 
7848   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7849       getLoc(), getContext()));
7850 
7851   return ParseStatus::Success;
7852 }
7853 
7854 template <bool ParseShiftExtend, bool ParseSuffix>
7855 ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7856   const SMLoc S = getLoc();
7857   // Check for a SVE vector register specifier first.
7858   MCRegister RegNum;
7859   StringRef Kind;
7860 
7861   ParseStatus Res =
7862       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7863 
7864   if (!Res.isSuccess())
7865     return Res;
7866 
7867   if (ParseSuffix && Kind.empty())
7868     return ParseStatus::NoMatch;
7869 
7870   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7871   if (!KindRes)
7872     return ParseStatus::NoMatch;
7873 
7874   unsigned ElementWidth = KindRes->second;
7875 
7876   // No shift/extend is the default.
7877   if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7878     Operands.push_back(AArch64Operand::CreateVectorReg(
7879         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7880 
7881     ParseStatus Res = tryParseVectorIndex(Operands);
7882     if (Res.isFailure())
7883       return ParseStatus::Failure;
7884     return ParseStatus::Success;
7885   }
7886 
7887   // Eat the comma
7888   Lex();
7889 
7890   // Match the shift
7891   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
7892   Res = tryParseOptionalShiftExtend(ExtOpnd);
7893   if (!Res.isSuccess())
7894     return Res;
7895 
7896   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7897   Operands.push_back(AArch64Operand::CreateVectorReg(
7898       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7899       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7900       Ext->hasShiftExtendAmount()));
7901 
7902   return ParseStatus::Success;
7903 }
7904 
7905 ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7906   MCAsmParser &Parser = getParser();
7907 
7908   SMLoc SS = getLoc();
7909   const AsmToken &TokE = getTok();
7910   bool IsHash = TokE.is(AsmToken::Hash);
7911 
7912   if (!IsHash && TokE.isNot(AsmToken::Identifier))
7913     return ParseStatus::NoMatch;
7914 
7915   int64_t Pattern;
7916   if (IsHash) {
7917     Lex(); // Eat hash
7918 
7919     // Parse the immediate operand.
7920     const MCExpr *ImmVal;
7921     SS = getLoc();
7922     if (Parser.parseExpression(ImmVal))
7923       return ParseStatus::Failure;
7924 
7925     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
7926     if (!MCE)
7927       return ParseStatus::Failure;
7928 
7929     Pattern = MCE->getValue();
7930   } else {
7931     // Parse the pattern
7932     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
7933     if (!Pat)
7934       return ParseStatus::NoMatch;
7935 
7936     Lex();
7937     Pattern = Pat->Encoding;
7938     assert(Pattern >= 0 && Pattern < 32);
7939   }
7940 
7941   Operands.push_back(
7942       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7943                                 SS, getLoc(), getContext()));
7944 
7945   return ParseStatus::Success;
7946 }
7947 
7948 ParseStatus
7949 AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
7950   int64_t Pattern;
7951   SMLoc SS = getLoc();
7952   const AsmToken &TokE = getTok();
7953   // Parse the pattern
7954   auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
7955       TokE.getString());
7956   if (!Pat)
7957     return ParseStatus::NoMatch;
7958 
7959   Lex();
7960   Pattern = Pat->Encoding;
7961   assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
7962 
7963   Operands.push_back(
7964       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7965                                 SS, getLoc(), getContext()));
7966 
7967   return ParseStatus::Success;
7968 }
7969 
7970 ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
7971   SMLoc SS = getLoc();
7972 
7973   MCRegister XReg;
7974   if (!tryParseScalarRegister(XReg).isSuccess())
7975     return ParseStatus::NoMatch;
7976 
7977   MCContext &ctx = getContext();
7978   const MCRegisterInfo *RI = ctx.getRegisterInfo();
7979   int X8Reg = RI->getMatchingSuperReg(
7980       XReg, AArch64::x8sub_0,
7981       &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
7982   if (!X8Reg)
7983     return Error(SS,
7984                  "expected an even-numbered x-register in the range [x0,x22]");
7985 
7986   Operands.push_back(
7987       AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
7988   return ParseStatus::Success;
7989 }
7990 
7991 ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
7992   SMLoc S = getLoc();
7993 
7994   if (getTok().isNot(AsmToken::Integer))
7995     return ParseStatus::NoMatch;
7996 
7997   if (getLexer().peekTok().isNot(AsmToken::Colon))
7998     return ParseStatus::NoMatch;
7999 
8000   const MCExpr *ImmF;
8001   if (getParser().parseExpression(ImmF))
8002     return ParseStatus::NoMatch;
8003 
8004   if (getTok().isNot(AsmToken::Colon))
8005     return ParseStatus::NoMatch;
8006 
8007   Lex(); // Eat ':'
8008   if (getTok().isNot(AsmToken::Integer))
8009     return ParseStatus::NoMatch;
8010 
8011   SMLoc E = getTok().getLoc();
8012   const MCExpr *ImmL;
8013   if (getParser().parseExpression(ImmL))
8014     return ParseStatus::NoMatch;
8015 
8016   unsigned ImmFVal = dyn_cast<MCConstantExpr>(ImmF)->getValue();
8017   unsigned ImmLVal = dyn_cast<MCConstantExpr>(ImmL)->getValue();
8018 
8019   Operands.push_back(
8020       AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8021   return ParseStatus::Success;
8022 }
8023