1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "MCTargetDesc/AArch64AddressingModes.h"
10 #include "MCTargetDesc/AArch64InstPrinter.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "TargetInfo/AArch64TargetInfo.h"
15 #include "AArch64InstrInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCLinkerOptimizationHint.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSubtargetInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCTargetOptions.h"
43 #include "llvm/MC/SubtargetFeature.h"
44 #include "llvm/MC/MCValue.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/SMLoc.h"
50 #include "llvm/Support/TargetParser.h"
51 #include "llvm/Support/TargetRegistry.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include <cassert>
54 #include <cctype>
55 #include <cstdint>
56 #include <cstdio>
57 #include <string>
58 #include <tuple>
59 #include <utility>
60 #include <vector>
61 
62 using namespace llvm;
63 
64 namespace {
65 
66 enum class RegKind {
67   Scalar,
68   NeonVector,
69   SVEDataVector,
70   SVEPredicateVector,
71   Matrix
72 };
73 
74 enum class MatrixKind { Array, Tile, Row, Col };
75 
76 enum RegConstraintEqualityTy {
77   EqualsReg,
78   EqualsSuperReg,
79   EqualsSubReg
80 };
81 
82 class AArch64AsmParser : public MCTargetAsmParser {
83 private:
84   StringRef Mnemonic; ///< Instruction mnemonic.
85 
86   // Map of register aliases registers via the .req directive.
87   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
88 
89   class PrefixInfo {
90   public:
91     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
92       PrefixInfo Prefix;
93       switch (Inst.getOpcode()) {
94       case AArch64::MOVPRFX_ZZ:
95         Prefix.Active = true;
96         Prefix.Dst = Inst.getOperand(0).getReg();
97         break;
98       case AArch64::MOVPRFX_ZPmZ_B:
99       case AArch64::MOVPRFX_ZPmZ_H:
100       case AArch64::MOVPRFX_ZPmZ_S:
101       case AArch64::MOVPRFX_ZPmZ_D:
102         Prefix.Active = true;
103         Prefix.Predicated = true;
104         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
105         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
106                "No destructive element size set for movprfx");
107         Prefix.Dst = Inst.getOperand(0).getReg();
108         Prefix.Pg = Inst.getOperand(2).getReg();
109         break;
110       case AArch64::MOVPRFX_ZPzZ_B:
111       case AArch64::MOVPRFX_ZPzZ_H:
112       case AArch64::MOVPRFX_ZPzZ_S:
113       case AArch64::MOVPRFX_ZPzZ_D:
114         Prefix.Active = true;
115         Prefix.Predicated = true;
116         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
117         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
118                "No destructive element size set for movprfx");
119         Prefix.Dst = Inst.getOperand(0).getReg();
120         Prefix.Pg = Inst.getOperand(1).getReg();
121         break;
122       default:
123         break;
124       }
125 
126       return Prefix;
127     }
128 
129     PrefixInfo() : Active(false), Predicated(false) {}
130     bool isActive() const { return Active; }
131     bool isPredicated() const { return Predicated; }
132     unsigned getElementSize() const {
133       assert(Predicated);
134       return ElementSize;
135     }
136     unsigned getDstReg() const { return Dst; }
137     unsigned getPgReg() const {
138       assert(Predicated);
139       return Pg;
140     }
141 
142   private:
143     bool Active;
144     bool Predicated;
145     unsigned ElementSize;
146     unsigned Dst;
147     unsigned Pg;
148   } NextPrefix;
149 
150   AArch64TargetStreamer &getTargetStreamer() {
151     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
152     return static_cast<AArch64TargetStreamer &>(TS);
153   }
154 
155   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
156 
157   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
158   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
159   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
160   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
161   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
162   bool parseRegister(OperandVector &Operands);
163   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
164   bool parseNeonVectorList(OperandVector &Operands);
165   bool parseOptionalMulOperand(OperandVector &Operands);
166   bool parseKeywordOperand(OperandVector &Operands);
167   bool parseOperand(OperandVector &Operands, bool isCondCode,
168                     bool invertCondCode);
169   bool parseImmExpr(int64_t &Out);
170   bool parseComma();
171   bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
172                             unsigned Last);
173 
174   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
175                       OperandVector &Operands);
176 
177   bool parseDirectiveArch(SMLoc L);
178   bool parseDirectiveArchExtension(SMLoc L);
179   bool parseDirectiveCPU(SMLoc L);
180   bool parseDirectiveInst(SMLoc L);
181 
182   bool parseDirectiveTLSDescCall(SMLoc L);
183 
184   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
185   bool parseDirectiveLtorg(SMLoc L);
186 
187   bool parseDirectiveReq(StringRef Name, SMLoc L);
188   bool parseDirectiveUnreq(SMLoc L);
189   bool parseDirectiveCFINegateRAState();
190   bool parseDirectiveCFIBKeyFrame();
191 
192   bool parseDirectiveVariantPCS(SMLoc L);
193 
194   bool parseDirectiveSEHAllocStack(SMLoc L);
195   bool parseDirectiveSEHPrologEnd(SMLoc L);
196   bool parseDirectiveSEHSaveR19R20X(SMLoc L);
197   bool parseDirectiveSEHSaveFPLR(SMLoc L);
198   bool parseDirectiveSEHSaveFPLRX(SMLoc L);
199   bool parseDirectiveSEHSaveReg(SMLoc L);
200   bool parseDirectiveSEHSaveRegX(SMLoc L);
201   bool parseDirectiveSEHSaveRegP(SMLoc L);
202   bool parseDirectiveSEHSaveRegPX(SMLoc L);
203   bool parseDirectiveSEHSaveLRPair(SMLoc L);
204   bool parseDirectiveSEHSaveFReg(SMLoc L);
205   bool parseDirectiveSEHSaveFRegX(SMLoc L);
206   bool parseDirectiveSEHSaveFRegP(SMLoc L);
207   bool parseDirectiveSEHSaveFRegPX(SMLoc L);
208   bool parseDirectiveSEHSetFP(SMLoc L);
209   bool parseDirectiveSEHAddFP(SMLoc L);
210   bool parseDirectiveSEHNop(SMLoc L);
211   bool parseDirectiveSEHSaveNext(SMLoc L);
212   bool parseDirectiveSEHEpilogStart(SMLoc L);
213   bool parseDirectiveSEHEpilogEnd(SMLoc L);
214   bool parseDirectiveSEHTrapFrame(SMLoc L);
215   bool parseDirectiveSEHMachineFrame(SMLoc L);
216   bool parseDirectiveSEHContext(SMLoc L);
217   bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
218 
219   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
220                            SmallVectorImpl<SMLoc> &Loc);
221   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
222                                OperandVector &Operands, MCStreamer &Out,
223                                uint64_t &ErrorInfo,
224                                bool MatchingInlineAsm) override;
225 /// @name Auto-generated Match Functions
226 /// {
227 
228 #define GET_ASSEMBLER_HEADER
229 #include "AArch64GenAsmMatcher.inc"
230 
231   /// }
232 
233   OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
234   OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
235                                               RegKind MatchKind);
236   OperandMatchResultTy tryParseMatrixRegister(OperandVector &Operands);
237   OperandMatchResultTy tryParseSVCR(OperandVector &Operands);
238   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
239   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
240   OperandMatchResultTy tryParseBarriernXSOperand(OperandVector &Operands);
241   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
242   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
243   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
244   template <bool IsSVEPrefetch = false>
245   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
246   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
247   OperandMatchResultTy tryParseBTIHint(OperandVector &Operands);
248   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
249   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
250   template<bool AddFPZeroAsLiteral>
251   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
252   OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
253   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
254   bool tryParseNeonVectorRegister(OperandVector &Operands);
255   OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
256   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
257   template <bool ParseShiftExtend,
258             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
259   OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
260   template <bool ParseShiftExtend, bool ParseSuffix>
261   OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
262   OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
263   template <RegKind VectorKind>
264   OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
265                                           bool ExpectMatch = false);
266   OperandMatchResultTy tryParseMatrixTileList(OperandVector &Operands);
267   OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
268   OperandMatchResultTy tryParseGPR64x8(OperandVector &Operands);
269 
270 public:
271   enum AArch64MatchResultTy {
272     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
273 #define GET_OPERAND_DIAGNOSTIC_TYPES
274 #include "AArch64GenAsmMatcher.inc"
275   };
276   bool IsILP32;
277 
278   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
279                    const MCInstrInfo &MII, const MCTargetOptions &Options)
280     : MCTargetAsmParser(Options, STI, MII) {
281     IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
282     MCAsmParserExtension::Initialize(Parser);
283     MCStreamer &S = getParser().getStreamer();
284     if (S.getTargetStreamer() == nullptr)
285       new AArch64TargetStreamer(S);
286 
287     // Alias .hword/.word/.[dx]word to the target-independent
288     // .2byte/.4byte/.8byte directives as they have the same form and
289     // semantics:
290     ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
291     Parser.addAliasForDirective(".hword", ".2byte");
292     Parser.addAliasForDirective(".word", ".4byte");
293     Parser.addAliasForDirective(".dword", ".8byte");
294     Parser.addAliasForDirective(".xword", ".8byte");
295 
296     // Initialize the set of available features.
297     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
298   }
299 
300   bool regsEqual(const MCParsedAsmOperand &Op1,
301                  const MCParsedAsmOperand &Op2) const override;
302   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
303                         SMLoc NameLoc, OperandVector &Operands) override;
304   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
305   OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
306                                         SMLoc &EndLoc) override;
307   bool ParseDirective(AsmToken DirectiveID) override;
308   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
309                                       unsigned Kind) override;
310 
311   static bool classifySymbolRef(const MCExpr *Expr,
312                                 AArch64MCExpr::VariantKind &ELFRefKind,
313                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
314                                 int64_t &Addend);
315 };
316 
317 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
318 /// instruction.
319 class AArch64Operand : public MCParsedAsmOperand {
320 private:
321   enum KindTy {
322     k_Immediate,
323     k_ShiftedImm,
324     k_CondCode,
325     k_Register,
326     k_MatrixRegister,
327     k_MatrixTileList,
328     k_SVCR,
329     k_VectorList,
330     k_VectorIndex,
331     k_Token,
332     k_SysReg,
333     k_SysCR,
334     k_Prefetch,
335     k_ShiftExtend,
336     k_FPImm,
337     k_Barrier,
338     k_PSBHint,
339     k_BTIHint,
340   } Kind;
341 
342   SMLoc StartLoc, EndLoc;
343 
344   struct TokOp {
345     const char *Data;
346     unsigned Length;
347     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
348   };
349 
350   // Separate shift/extend operand.
351   struct ShiftExtendOp {
352     AArch64_AM::ShiftExtendType Type;
353     unsigned Amount;
354     bool HasExplicitAmount;
355   };
356 
357   struct RegOp {
358     unsigned RegNum;
359     RegKind Kind;
360     int ElementWidth;
361 
362     // The register may be allowed as a different register class,
363     // e.g. for GPR64as32 or GPR32as64.
364     RegConstraintEqualityTy EqualityTy;
365 
366     // In some cases the shift/extend needs to be explicitly parsed together
367     // with the register, rather than as a separate operand. This is needed
368     // for addressing modes where the instruction as a whole dictates the
369     // scaling/extend, rather than specific bits in the instruction.
370     // By parsing them as a single operand, we avoid the need to pass an
371     // extra operand in all CodeGen patterns (because all operands need to
372     // have an associated value), and we avoid the need to update TableGen to
373     // accept operands that have no associated bits in the instruction.
374     //
375     // An added benefit of parsing them together is that the assembler
376     // can give a sensible diagnostic if the scaling is not correct.
377     //
378     // The default is 'lsl #0' (HasExplicitAmount = false) if no
379     // ShiftExtend is specified.
380     ShiftExtendOp ShiftExtend;
381   };
382 
383   struct MatrixRegOp {
384     unsigned RegNum;
385     unsigned ElementWidth;
386     MatrixKind Kind;
387   };
388 
389   struct MatrixTileListOp {
390     unsigned RegMask = 0;
391   };
392 
393   struct VectorListOp {
394     unsigned RegNum;
395     unsigned Count;
396     unsigned NumElements;
397     unsigned ElementWidth;
398     RegKind  RegisterKind;
399   };
400 
401   struct VectorIndexOp {
402     int Val;
403   };
404 
405   struct ImmOp {
406     const MCExpr *Val;
407   };
408 
409   struct ShiftedImmOp {
410     const MCExpr *Val;
411     unsigned ShiftAmount;
412   };
413 
414   struct CondCodeOp {
415     AArch64CC::CondCode Code;
416   };
417 
418   struct FPImmOp {
419     uint64_t Val; // APFloat value bitcasted to uint64_t.
420     bool IsExact; // describes whether parsed value was exact.
421   };
422 
423   struct BarrierOp {
424     const char *Data;
425     unsigned Length;
426     unsigned Val; // Not the enum since not all values have names.
427     bool HasnXSModifier;
428   };
429 
430   struct SysRegOp {
431     const char *Data;
432     unsigned Length;
433     uint32_t MRSReg;
434     uint32_t MSRReg;
435     uint32_t PStateField;
436   };
437 
438   struct SysCRImmOp {
439     unsigned Val;
440   };
441 
442   struct PrefetchOp {
443     const char *Data;
444     unsigned Length;
445     unsigned Val;
446   };
447 
448   struct PSBHintOp {
449     const char *Data;
450     unsigned Length;
451     unsigned Val;
452   };
453 
454   struct BTIHintOp {
455     const char *Data;
456     unsigned Length;
457     unsigned Val;
458   };
459 
460   struct SVCROp {
461     const char *Data;
462     unsigned Length;
463     unsigned PStateField;
464   };
465 
466   union {
467     struct TokOp Tok;
468     struct RegOp Reg;
469     struct MatrixRegOp MatrixReg;
470     struct MatrixTileListOp MatrixTileList;
471     struct VectorListOp VectorList;
472     struct VectorIndexOp VectorIndex;
473     struct ImmOp Imm;
474     struct ShiftedImmOp ShiftedImm;
475     struct CondCodeOp CondCode;
476     struct FPImmOp FPImm;
477     struct BarrierOp Barrier;
478     struct SysRegOp SysReg;
479     struct SysCRImmOp SysCRImm;
480     struct PrefetchOp Prefetch;
481     struct PSBHintOp PSBHint;
482     struct BTIHintOp BTIHint;
483     struct ShiftExtendOp ShiftExtend;
484     struct SVCROp SVCR;
485   };
486 
487   // Keep the MCContext around as the MCExprs may need manipulated during
488   // the add<>Operands() calls.
489   MCContext &Ctx;
490 
491 public:
492   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
493 
494   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
495     Kind = o.Kind;
496     StartLoc = o.StartLoc;
497     EndLoc = o.EndLoc;
498     switch (Kind) {
499     case k_Token:
500       Tok = o.Tok;
501       break;
502     case k_Immediate:
503       Imm = o.Imm;
504       break;
505     case k_ShiftedImm:
506       ShiftedImm = o.ShiftedImm;
507       break;
508     case k_CondCode:
509       CondCode = o.CondCode;
510       break;
511     case k_FPImm:
512       FPImm = o.FPImm;
513       break;
514     case k_Barrier:
515       Barrier = o.Barrier;
516       break;
517     case k_Register:
518       Reg = o.Reg;
519       break;
520     case k_MatrixRegister:
521       MatrixReg = o.MatrixReg;
522       break;
523     case k_MatrixTileList:
524       MatrixTileList = o.MatrixTileList;
525       break;
526     case k_VectorList:
527       VectorList = o.VectorList;
528       break;
529     case k_VectorIndex:
530       VectorIndex = o.VectorIndex;
531       break;
532     case k_SysReg:
533       SysReg = o.SysReg;
534       break;
535     case k_SysCR:
536       SysCRImm = o.SysCRImm;
537       break;
538     case k_Prefetch:
539       Prefetch = o.Prefetch;
540       break;
541     case k_PSBHint:
542       PSBHint = o.PSBHint;
543       break;
544     case k_BTIHint:
545       BTIHint = o.BTIHint;
546       break;
547     case k_ShiftExtend:
548       ShiftExtend = o.ShiftExtend;
549       break;
550     case k_SVCR:
551       SVCR = o.SVCR;
552       break;
553     }
554   }
555 
556   /// getStartLoc - Get the location of the first token of this operand.
557   SMLoc getStartLoc() const override { return StartLoc; }
558   /// getEndLoc - Get the location of the last token of this operand.
559   SMLoc getEndLoc() const override { return EndLoc; }
560 
561   StringRef getToken() const {
562     assert(Kind == k_Token && "Invalid access!");
563     return StringRef(Tok.Data, Tok.Length);
564   }
565 
566   bool isTokenSuffix() const {
567     assert(Kind == k_Token && "Invalid access!");
568     return Tok.IsSuffix;
569   }
570 
571   const MCExpr *getImm() const {
572     assert(Kind == k_Immediate && "Invalid access!");
573     return Imm.Val;
574   }
575 
576   const MCExpr *getShiftedImmVal() const {
577     assert(Kind == k_ShiftedImm && "Invalid access!");
578     return ShiftedImm.Val;
579   }
580 
581   unsigned getShiftedImmShift() const {
582     assert(Kind == k_ShiftedImm && "Invalid access!");
583     return ShiftedImm.ShiftAmount;
584   }
585 
586   AArch64CC::CondCode getCondCode() const {
587     assert(Kind == k_CondCode && "Invalid access!");
588     return CondCode.Code;
589   }
590 
591   APFloat getFPImm() const {
592     assert (Kind == k_FPImm && "Invalid access!");
593     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
594   }
595 
596   bool getFPImmIsExact() const {
597     assert (Kind == k_FPImm && "Invalid access!");
598     return FPImm.IsExact;
599   }
600 
601   unsigned getBarrier() const {
602     assert(Kind == k_Barrier && "Invalid access!");
603     return Barrier.Val;
604   }
605 
606   StringRef getBarrierName() const {
607     assert(Kind == k_Barrier && "Invalid access!");
608     return StringRef(Barrier.Data, Barrier.Length);
609   }
610 
611   bool getBarriernXSModifier() const {
612     assert(Kind == k_Barrier && "Invalid access!");
613     return Barrier.HasnXSModifier;
614   }
615 
616   unsigned getReg() const override {
617     assert(Kind == k_Register && "Invalid access!");
618     return Reg.RegNum;
619   }
620 
621   unsigned getMatrixReg() const {
622     assert(Kind == k_MatrixRegister && "Invalid access!");
623     return MatrixReg.RegNum;
624   }
625 
626   unsigned getMatrixElementWidth() const {
627     assert(Kind == k_MatrixRegister && "Invalid access!");
628     return MatrixReg.ElementWidth;
629   }
630 
631   MatrixKind getMatrixKind() const {
632     assert(Kind == k_MatrixRegister && "Invalid access!");
633     return MatrixReg.Kind;
634   }
635 
636   unsigned getMatrixTileListRegMask() const {
637     assert(isMatrixTileList() && "Invalid access!");
638     return MatrixTileList.RegMask;
639   }
640 
641   RegConstraintEqualityTy getRegEqualityTy() const {
642     assert(Kind == k_Register && "Invalid access!");
643     return Reg.EqualityTy;
644   }
645 
646   unsigned getVectorListStart() const {
647     assert(Kind == k_VectorList && "Invalid access!");
648     return VectorList.RegNum;
649   }
650 
651   unsigned getVectorListCount() const {
652     assert(Kind == k_VectorList && "Invalid access!");
653     return VectorList.Count;
654   }
655 
656   int getVectorIndex() const {
657     assert(Kind == k_VectorIndex && "Invalid access!");
658     return VectorIndex.Val;
659   }
660 
661   StringRef getSysReg() const {
662     assert(Kind == k_SysReg && "Invalid access!");
663     return StringRef(SysReg.Data, SysReg.Length);
664   }
665 
666   unsigned getSysCR() const {
667     assert(Kind == k_SysCR && "Invalid access!");
668     return SysCRImm.Val;
669   }
670 
671   unsigned getPrefetch() const {
672     assert(Kind == k_Prefetch && "Invalid access!");
673     return Prefetch.Val;
674   }
675 
676   unsigned getPSBHint() const {
677     assert(Kind == k_PSBHint && "Invalid access!");
678     return PSBHint.Val;
679   }
680 
681   StringRef getPSBHintName() const {
682     assert(Kind == k_PSBHint && "Invalid access!");
683     return StringRef(PSBHint.Data, PSBHint.Length);
684   }
685 
686   unsigned getBTIHint() const {
687     assert(Kind == k_BTIHint && "Invalid access!");
688     return BTIHint.Val;
689   }
690 
691   StringRef getBTIHintName() const {
692     assert(Kind == k_BTIHint && "Invalid access!");
693     return StringRef(BTIHint.Data, BTIHint.Length);
694   }
695 
696   StringRef getSVCR() const {
697     assert(Kind == k_SVCR && "Invalid access!");
698     return StringRef(SVCR.Data, SVCR.Length);
699   }
700 
701   StringRef getPrefetchName() const {
702     assert(Kind == k_Prefetch && "Invalid access!");
703     return StringRef(Prefetch.Data, Prefetch.Length);
704   }
705 
706   AArch64_AM::ShiftExtendType getShiftExtendType() const {
707     if (Kind == k_ShiftExtend)
708       return ShiftExtend.Type;
709     if (Kind == k_Register)
710       return Reg.ShiftExtend.Type;
711     llvm_unreachable("Invalid access!");
712   }
713 
714   unsigned getShiftExtendAmount() const {
715     if (Kind == k_ShiftExtend)
716       return ShiftExtend.Amount;
717     if (Kind == k_Register)
718       return Reg.ShiftExtend.Amount;
719     llvm_unreachable("Invalid access!");
720   }
721 
722   bool hasShiftExtendAmount() const {
723     if (Kind == k_ShiftExtend)
724       return ShiftExtend.HasExplicitAmount;
725     if (Kind == k_Register)
726       return Reg.ShiftExtend.HasExplicitAmount;
727     llvm_unreachable("Invalid access!");
728   }
729 
730   bool isImm() const override { return Kind == k_Immediate; }
731   bool isMem() const override { return false; }
732 
733   bool isUImm6() const {
734     if (!isImm())
735       return false;
736     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737     if (!MCE)
738       return false;
739     int64_t Val = MCE->getValue();
740     return (Val >= 0 && Val < 64);
741   }
742 
743   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
744 
745   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
746     return isImmScaled<Bits, Scale>(true);
747   }
748 
749   template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
750     return isImmScaled<Bits, Scale>(false);
751   }
752 
753   template <int Bits, int Scale>
754   DiagnosticPredicate isImmScaled(bool Signed) const {
755     if (!isImm())
756       return DiagnosticPredicateTy::NoMatch;
757 
758     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
759     if (!MCE)
760       return DiagnosticPredicateTy::NoMatch;
761 
762     int64_t MinVal, MaxVal;
763     if (Signed) {
764       int64_t Shift = Bits - 1;
765       MinVal = (int64_t(1) << Shift) * -Scale;
766       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
767     } else {
768       MinVal = 0;
769       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
770     }
771 
772     int64_t Val = MCE->getValue();
773     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
774       return DiagnosticPredicateTy::Match;
775 
776     return DiagnosticPredicateTy::NearMatch;
777   }
778 
779   DiagnosticPredicate isSVEPattern() const {
780     if (!isImm())
781       return DiagnosticPredicateTy::NoMatch;
782     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
783     if (!MCE)
784       return DiagnosticPredicateTy::NoMatch;
785     int64_t Val = MCE->getValue();
786     if (Val >= 0 && Val < 32)
787       return DiagnosticPredicateTy::Match;
788     return DiagnosticPredicateTy::NearMatch;
789   }
790 
791   bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
792     AArch64MCExpr::VariantKind ELFRefKind;
793     MCSymbolRefExpr::VariantKind DarwinRefKind;
794     int64_t Addend;
795     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
796                                            Addend)) {
797       // If we don't understand the expression, assume the best and
798       // let the fixup and relocation code deal with it.
799       return true;
800     }
801 
802     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
803         ELFRefKind == AArch64MCExpr::VK_LO12 ||
804         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
805         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
806         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
807         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
808         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
809         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
810         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
811         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
812         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
813         ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
814       // Note that we don't range-check the addend. It's adjusted modulo page
815       // size when converted, so there is no "out of range" condition when using
816       // @pageoff.
817       return true;
818     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
819                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
820       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
821       return Addend == 0;
822     }
823 
824     return false;
825   }
826 
827   template <int Scale> bool isUImm12Offset() const {
828     if (!isImm())
829       return false;
830 
831     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
832     if (!MCE)
833       return isSymbolicUImm12Offset(getImm());
834 
835     int64_t Val = MCE->getValue();
836     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
837   }
838 
839   template <int N, int M>
840   bool isImmInRange() const {
841     if (!isImm())
842       return false;
843     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
844     if (!MCE)
845       return false;
846     int64_t Val = MCE->getValue();
847     return (Val >= N && Val <= M);
848   }
849 
850   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
851   // a logical immediate can always be represented when inverted.
852   template <typename T>
853   bool isLogicalImm() const {
854     if (!isImm())
855       return false;
856     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
857     if (!MCE)
858       return false;
859 
860     int64_t Val = MCE->getValue();
861     // Avoid left shift by 64 directly.
862     uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
863     // Allow all-0 or all-1 in top bits to permit bitwise NOT.
864     if ((Val & Upper) && (Val & Upper) != Upper)
865       return false;
866 
867     return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
868   }
869 
870   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
871 
872   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
873   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
874   /// immediate that can be shifted by 'Shift'.
875   template <unsigned Width>
876   Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
877     if (isShiftedImm() && Width == getShiftedImmShift())
878       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
879         return std::make_pair(CE->getValue(), Width);
880 
881     if (isImm())
882       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
883         int64_t Val = CE->getValue();
884         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
885           return std::make_pair(Val >> Width, Width);
886         else
887           return std::make_pair(Val, 0u);
888       }
889 
890     return {};
891   }
892 
893   bool isAddSubImm() const {
894     if (!isShiftedImm() && !isImm())
895       return false;
896 
897     const MCExpr *Expr;
898 
899     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
900     if (isShiftedImm()) {
901       unsigned Shift = ShiftedImm.ShiftAmount;
902       Expr = ShiftedImm.Val;
903       if (Shift != 0 && Shift != 12)
904         return false;
905     } else {
906       Expr = getImm();
907     }
908 
909     AArch64MCExpr::VariantKind ELFRefKind;
910     MCSymbolRefExpr::VariantKind DarwinRefKind;
911     int64_t Addend;
912     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
913                                           DarwinRefKind, Addend)) {
914       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
915           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
916           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
917           || ELFRefKind == AArch64MCExpr::VK_LO12
918           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
919           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
920           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
921           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
922           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
923           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
924           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
925           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
926           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
927     }
928 
929     // If it's a constant, it should be a real immediate in range.
930     if (auto ShiftedVal = getShiftedVal<12>())
931       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
932 
933     // If it's an expression, we hope for the best and let the fixup/relocation
934     // code deal with it.
935     return true;
936   }
937 
938   bool isAddSubImmNeg() const {
939     if (!isShiftedImm() && !isImm())
940       return false;
941 
942     // Otherwise it should be a real negative immediate in range.
943     if (auto ShiftedVal = getShiftedVal<12>())
944       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
945 
946     return false;
947   }
948 
949   // Signed value in the range -128 to +127. For element widths of
950   // 16 bits or higher it may also be a signed multiple of 256 in the
951   // range -32768 to +32512.
952   // For element-width of 8 bits a range of -128 to 255 is accepted,
953   // since a copy of a byte can be either signed/unsigned.
954   template <typename T>
955   DiagnosticPredicate isSVECpyImm() const {
956     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
957       return DiagnosticPredicateTy::NoMatch;
958 
959     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
960                   std::is_same<int8_t, T>::value;
961     if (auto ShiftedImm = getShiftedVal<8>())
962       if (!(IsByte && ShiftedImm->second) &&
963           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
964                                      << ShiftedImm->second))
965         return DiagnosticPredicateTy::Match;
966 
967     return DiagnosticPredicateTy::NearMatch;
968   }
969 
970   // Unsigned value in the range 0 to 255. For element widths of
971   // 16 bits or higher it may also be a signed multiple of 256 in the
972   // range 0 to 65280.
973   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
974     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
975       return DiagnosticPredicateTy::NoMatch;
976 
977     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
978                   std::is_same<int8_t, T>::value;
979     if (auto ShiftedImm = getShiftedVal<8>())
980       if (!(IsByte && ShiftedImm->second) &&
981           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
982                                         << ShiftedImm->second))
983         return DiagnosticPredicateTy::Match;
984 
985     return DiagnosticPredicateTy::NearMatch;
986   }
987 
988   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
989     if (isLogicalImm<T>() && !isSVECpyImm<T>())
990       return DiagnosticPredicateTy::Match;
991     return DiagnosticPredicateTy::NoMatch;
992   }
993 
994   bool isCondCode() const { return Kind == k_CondCode; }
995 
996   bool isSIMDImmType10() const {
997     if (!isImm())
998       return false;
999     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1000     if (!MCE)
1001       return false;
1002     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1003   }
1004 
1005   template<int N>
1006   bool isBranchTarget() const {
1007     if (!isImm())
1008       return false;
1009     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1010     if (!MCE)
1011       return true;
1012     int64_t Val = MCE->getValue();
1013     if (Val & 0x3)
1014       return false;
1015     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1016     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1017   }
1018 
1019   bool
1020   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1021     if (!isImm())
1022       return false;
1023 
1024     AArch64MCExpr::VariantKind ELFRefKind;
1025     MCSymbolRefExpr::VariantKind DarwinRefKind;
1026     int64_t Addend;
1027     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1028                                              DarwinRefKind, Addend)) {
1029       return false;
1030     }
1031     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1032       return false;
1033 
1034     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
1035       if (ELFRefKind == AllowedModifiers[i])
1036         return true;
1037     }
1038 
1039     return false;
1040   }
1041 
1042   bool isMovWSymbolG3() const {
1043     return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1044   }
1045 
1046   bool isMovWSymbolG2() const {
1047     return isMovWSymbol(
1048         {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1049          AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1050          AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1051          AArch64MCExpr::VK_DTPREL_G2});
1052   }
1053 
1054   bool isMovWSymbolG1() const {
1055     return isMovWSymbol(
1056         {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1057          AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1058          AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1059          AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1060          AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1061   }
1062 
1063   bool isMovWSymbolG0() const {
1064     return isMovWSymbol(
1065         {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1066          AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1067          AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1068          AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1069          AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1070   }
1071 
1072   template<int RegWidth, int Shift>
1073   bool isMOVZMovAlias() const {
1074     if (!isImm()) return false;
1075 
1076     const MCExpr *E = getImm();
1077     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1078       uint64_t Value = CE->getValue();
1079 
1080       return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1081     }
1082     // Only supports the case of Shift being 0 if an expression is used as an
1083     // operand
1084     return !Shift && E;
1085   }
1086 
1087   template<int RegWidth, int Shift>
1088   bool isMOVNMovAlias() const {
1089     if (!isImm()) return false;
1090 
1091     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092     if (!CE) return false;
1093     uint64_t Value = CE->getValue();
1094 
1095     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1096   }
1097 
1098   bool isFPImm() const {
1099     return Kind == k_FPImm &&
1100            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1101   }
1102 
1103   bool isBarrier() const {
1104     return Kind == k_Barrier && !getBarriernXSModifier();
1105   }
1106   bool isBarriernXS() const {
1107     return Kind == k_Barrier && getBarriernXSModifier();
1108   }
1109   bool isSysReg() const { return Kind == k_SysReg; }
1110 
1111   bool isMRSSystemRegister() const {
1112     if (!isSysReg()) return false;
1113 
1114     return SysReg.MRSReg != -1U;
1115   }
1116 
1117   bool isMSRSystemRegister() const {
1118     if (!isSysReg()) return false;
1119     return SysReg.MSRReg != -1U;
1120   }
1121 
1122   bool isSystemPStateFieldWithImm0_1() const {
1123     if (!isSysReg()) return false;
1124     return (SysReg.PStateField == AArch64PState::PAN ||
1125             SysReg.PStateField == AArch64PState::DIT ||
1126             SysReg.PStateField == AArch64PState::UAO ||
1127             SysReg.PStateField == AArch64PState::SSBS);
1128   }
1129 
1130   bool isSystemPStateFieldWithImm0_15() const {
1131     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1132     return SysReg.PStateField != -1U;
1133   }
1134 
1135   bool isSVCR() const {
1136     if (Kind != k_SVCR)
1137       return false;
1138     return SVCR.PStateField != -1U;
1139   }
1140 
1141   bool isReg() const override {
1142     return Kind == k_Register;
1143   }
1144 
1145   bool isScalarReg() const {
1146     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1147   }
1148 
1149   bool isNeonVectorReg() const {
1150     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1151   }
1152 
1153   bool isNeonVectorRegLo() const {
1154     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1155            (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1156                 Reg.RegNum) ||
1157             AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1158                 Reg.RegNum));
1159   }
1160 
1161   bool isMatrix() const { return Kind == k_MatrixRegister; }
1162   bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1163 
1164   template <unsigned Class> bool isSVEVectorReg() const {
1165     RegKind RK;
1166     switch (Class) {
1167     case AArch64::ZPRRegClassID:
1168     case AArch64::ZPR_3bRegClassID:
1169     case AArch64::ZPR_4bRegClassID:
1170       RK = RegKind::SVEDataVector;
1171       break;
1172     case AArch64::PPRRegClassID:
1173     case AArch64::PPR_3bRegClassID:
1174       RK = RegKind::SVEPredicateVector;
1175       break;
1176     default:
1177       llvm_unreachable("Unsupport register class");
1178     }
1179 
1180     return (Kind == k_Register && Reg.Kind == RK) &&
1181            AArch64MCRegisterClasses[Class].contains(getReg());
1182   }
1183 
1184   template <unsigned Class> bool isFPRasZPR() const {
1185     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1186            AArch64MCRegisterClasses[Class].contains(getReg());
1187   }
1188 
1189   template <int ElementWidth, unsigned Class>
1190   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1191     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1192       return DiagnosticPredicateTy::NoMatch;
1193 
1194     if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1195       return DiagnosticPredicateTy::Match;
1196 
1197     return DiagnosticPredicateTy::NearMatch;
1198   }
1199 
1200   template <int ElementWidth, unsigned Class>
1201   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1202     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1203       return DiagnosticPredicateTy::NoMatch;
1204 
1205     if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1206       return DiagnosticPredicateTy::Match;
1207 
1208     return DiagnosticPredicateTy::NearMatch;
1209   }
1210 
1211   template <int ElementWidth, unsigned Class,
1212             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1213             bool ShiftWidthAlwaysSame>
1214   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1215     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1216     if (!VectorMatch.isMatch())
1217       return DiagnosticPredicateTy::NoMatch;
1218 
1219     // Give a more specific diagnostic when the user has explicitly typed in
1220     // a shift-amount that does not match what is expected, but for which
1221     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1222     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1223     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1224                         ShiftExtendTy == AArch64_AM::SXTW) &&
1225         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1226       return DiagnosticPredicateTy::NoMatch;
1227 
1228     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1229       return DiagnosticPredicateTy::Match;
1230 
1231     return DiagnosticPredicateTy::NearMatch;
1232   }
1233 
1234   bool isGPR32as64() const {
1235     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1236       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1237   }
1238 
1239   bool isGPR64as32() const {
1240     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1241       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1242   }
1243 
1244   bool isGPR64x8() const {
1245     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1246            AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1247                Reg.RegNum);
1248   }
1249 
1250   bool isWSeqPair() const {
1251     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1252            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1253                Reg.RegNum);
1254   }
1255 
1256   bool isXSeqPair() const {
1257     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1258            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1259                Reg.RegNum);
1260   }
1261 
1262   template<int64_t Angle, int64_t Remainder>
1263   DiagnosticPredicate isComplexRotation() const {
1264     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1265 
1266     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267     if (!CE) return DiagnosticPredicateTy::NoMatch;
1268     uint64_t Value = CE->getValue();
1269 
1270     if (Value % Angle == Remainder && Value <= 270)
1271       return DiagnosticPredicateTy::Match;
1272     return DiagnosticPredicateTy::NearMatch;
1273   }
1274 
1275   template <unsigned RegClassID> bool isGPR64() const {
1276     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1277            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1278   }
1279 
1280   template <unsigned RegClassID, int ExtWidth>
1281   DiagnosticPredicate isGPR64WithShiftExtend() const {
1282     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1283       return DiagnosticPredicateTy::NoMatch;
1284 
1285     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1286         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1287       return DiagnosticPredicateTy::Match;
1288     return DiagnosticPredicateTy::NearMatch;
1289   }
1290 
1291   /// Is this a vector list with the type implicit (presumably attached to the
1292   /// instruction itself)?
1293   template <RegKind VectorKind, unsigned NumRegs>
1294   bool isImplicitlyTypedVectorList() const {
1295     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1296            VectorList.NumElements == 0 &&
1297            VectorList.RegisterKind == VectorKind;
1298   }
1299 
1300   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1301             unsigned ElementWidth>
1302   bool isTypedVectorList() const {
1303     if (Kind != k_VectorList)
1304       return false;
1305     if (VectorList.Count != NumRegs)
1306       return false;
1307     if (VectorList.RegisterKind != VectorKind)
1308       return false;
1309     if (VectorList.ElementWidth != ElementWidth)
1310       return false;
1311     return VectorList.NumElements == NumElements;
1312   }
1313 
1314   template <int Min, int Max>
1315   DiagnosticPredicate isVectorIndex() const {
1316     if (Kind != k_VectorIndex)
1317       return DiagnosticPredicateTy::NoMatch;
1318     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1319       return DiagnosticPredicateTy::Match;
1320     return DiagnosticPredicateTy::NearMatch;
1321   }
1322 
1323   bool isToken() const override { return Kind == k_Token; }
1324 
1325   bool isTokenEqual(StringRef Str) const {
1326     return Kind == k_Token && getToken() == Str;
1327   }
1328   bool isSysCR() const { return Kind == k_SysCR; }
1329   bool isPrefetch() const { return Kind == k_Prefetch; }
1330   bool isPSBHint() const { return Kind == k_PSBHint; }
1331   bool isBTIHint() const { return Kind == k_BTIHint; }
1332   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1333   bool isShifter() const {
1334     if (!isShiftExtend())
1335       return false;
1336 
1337     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1338     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1339             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1340             ST == AArch64_AM::MSL);
1341   }
1342 
1343   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1344     if (Kind != k_FPImm)
1345       return DiagnosticPredicateTy::NoMatch;
1346 
1347     if (getFPImmIsExact()) {
1348       // Lookup the immediate from table of supported immediates.
1349       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1350       assert(Desc && "Unknown enum value");
1351 
1352       // Calculate its FP value.
1353       APFloat RealVal(APFloat::IEEEdouble());
1354       auto StatusOrErr =
1355           RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1356       if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1357         llvm_unreachable("FP immediate is not exact");
1358 
1359       if (getFPImm().bitwiseIsEqual(RealVal))
1360         return DiagnosticPredicateTy::Match;
1361     }
1362 
1363     return DiagnosticPredicateTy::NearMatch;
1364   }
1365 
1366   template <unsigned ImmA, unsigned ImmB>
1367   DiagnosticPredicate isExactFPImm() const {
1368     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1369     if ((Res = isExactFPImm<ImmA>()))
1370       return DiagnosticPredicateTy::Match;
1371     if ((Res = isExactFPImm<ImmB>()))
1372       return DiagnosticPredicateTy::Match;
1373     return Res;
1374   }
1375 
1376   bool isExtend() const {
1377     if (!isShiftExtend())
1378       return false;
1379 
1380     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1381     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1382             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1383             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1384             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1385             ET == AArch64_AM::LSL) &&
1386            getShiftExtendAmount() <= 4;
1387   }
1388 
1389   bool isExtend64() const {
1390     if (!isExtend())
1391       return false;
1392     // Make sure the extend expects a 32-bit source register.
1393     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1394     return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1395            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1396            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1397   }
1398 
1399   bool isExtendLSL64() const {
1400     if (!isExtend())
1401       return false;
1402     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1403     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1404             ET == AArch64_AM::LSL) &&
1405            getShiftExtendAmount() <= 4;
1406   }
1407 
1408   template<int Width> bool isMemXExtend() const {
1409     if (!isExtend())
1410       return false;
1411     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1412     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1413            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1414             getShiftExtendAmount() == 0);
1415   }
1416 
1417   template<int Width> bool isMemWExtend() const {
1418     if (!isExtend())
1419       return false;
1420     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1421     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1422            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1423             getShiftExtendAmount() == 0);
1424   }
1425 
1426   template <unsigned width>
1427   bool isArithmeticShifter() const {
1428     if (!isShifter())
1429       return false;
1430 
1431     // An arithmetic shifter is LSL, LSR, or ASR.
1432     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1433     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1434             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1435   }
1436 
1437   template <unsigned width>
1438   bool isLogicalShifter() const {
1439     if (!isShifter())
1440       return false;
1441 
1442     // A logical shifter is LSL, LSR, ASR or ROR.
1443     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1444     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1445             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1446            getShiftExtendAmount() < width;
1447   }
1448 
1449   bool isMovImm32Shifter() const {
1450     if (!isShifter())
1451       return false;
1452 
1453     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1454     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1455     if (ST != AArch64_AM::LSL)
1456       return false;
1457     uint64_t Val = getShiftExtendAmount();
1458     return (Val == 0 || Val == 16);
1459   }
1460 
1461   bool isMovImm64Shifter() const {
1462     if (!isShifter())
1463       return false;
1464 
1465     // A MOVi shifter is LSL of 0 or 16.
1466     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1467     if (ST != AArch64_AM::LSL)
1468       return false;
1469     uint64_t Val = getShiftExtendAmount();
1470     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1471   }
1472 
1473   bool isLogicalVecShifter() const {
1474     if (!isShifter())
1475       return false;
1476 
1477     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1478     unsigned Shift = getShiftExtendAmount();
1479     return getShiftExtendType() == AArch64_AM::LSL &&
1480            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1481   }
1482 
1483   bool isLogicalVecHalfWordShifter() const {
1484     if (!isLogicalVecShifter())
1485       return false;
1486 
1487     // A logical vector shifter is a left shift by 0 or 8.
1488     unsigned Shift = getShiftExtendAmount();
1489     return getShiftExtendType() == AArch64_AM::LSL &&
1490            (Shift == 0 || Shift == 8);
1491   }
1492 
1493   bool isMoveVecShifter() const {
1494     if (!isShiftExtend())
1495       return false;
1496 
1497     // A logical vector shifter is a left shift by 8 or 16.
1498     unsigned Shift = getShiftExtendAmount();
1499     return getShiftExtendType() == AArch64_AM::MSL &&
1500            (Shift == 8 || Shift == 16);
1501   }
1502 
1503   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1504   // to LDUR/STUR when the offset is not legal for the former but is for
1505   // the latter. As such, in addition to checking for being a legal unscaled
1506   // address, also check that it is not a legal scaled address. This avoids
1507   // ambiguity in the matcher.
1508   template<int Width>
1509   bool isSImm9OffsetFB() const {
1510     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1511   }
1512 
1513   bool isAdrpLabel() const {
1514     // Validation was handled during parsing, so we just sanity check that
1515     // something didn't go haywire.
1516     if (!isImm())
1517         return false;
1518 
1519     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1520       int64_t Val = CE->getValue();
1521       int64_t Min = - (4096 * (1LL << (21 - 1)));
1522       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1523       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1524     }
1525 
1526     return true;
1527   }
1528 
1529   bool isAdrLabel() const {
1530     // Validation was handled during parsing, so we just sanity check that
1531     // something didn't go haywire.
1532     if (!isImm())
1533         return false;
1534 
1535     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1536       int64_t Val = CE->getValue();
1537       int64_t Min = - (1LL << (21 - 1));
1538       int64_t Max = ((1LL << (21 - 1)) - 1);
1539       return Val >= Min && Val <= Max;
1540     }
1541 
1542     return true;
1543   }
1544 
1545   template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1546   DiagnosticPredicate isMatrixRegOperand() const {
1547     if (!isMatrix())
1548       return DiagnosticPredicateTy::NoMatch;
1549     if (getMatrixKind() != Kind ||
1550         !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1551         EltSize != getMatrixElementWidth())
1552       return DiagnosticPredicateTy::NearMatch;
1553     return DiagnosticPredicateTy::Match;
1554   }
1555 
1556   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1557     // Add as immediates when possible.  Null MCExpr = 0.
1558     if (!Expr)
1559       Inst.addOperand(MCOperand::createImm(0));
1560     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1561       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1562     else
1563       Inst.addOperand(MCOperand::createExpr(Expr));
1564   }
1565 
1566   void addRegOperands(MCInst &Inst, unsigned N) const {
1567     assert(N == 1 && "Invalid number of operands!");
1568     Inst.addOperand(MCOperand::createReg(getReg()));
1569   }
1570 
1571   void addMatrixOperands(MCInst &Inst, unsigned N) const {
1572     assert(N == 1 && "Invalid number of operands!");
1573     Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1574   }
1575 
1576   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1577     assert(N == 1 && "Invalid number of operands!");
1578     assert(
1579         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1580 
1581     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1582     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1583         RI->getEncodingValue(getReg()));
1584 
1585     Inst.addOperand(MCOperand::createReg(Reg));
1586   }
1587 
1588   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1589     assert(N == 1 && "Invalid number of operands!");
1590     assert(
1591         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1592 
1593     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1594     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1595         RI->getEncodingValue(getReg()));
1596 
1597     Inst.addOperand(MCOperand::createReg(Reg));
1598   }
1599 
1600   template <int Width>
1601   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1602     unsigned Base;
1603     switch (Width) {
1604     case 8:   Base = AArch64::B0; break;
1605     case 16:  Base = AArch64::H0; break;
1606     case 32:  Base = AArch64::S0; break;
1607     case 64:  Base = AArch64::D0; break;
1608     case 128: Base = AArch64::Q0; break;
1609     default:
1610       llvm_unreachable("Unsupported width");
1611     }
1612     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1613   }
1614 
1615   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1616     assert(N == 1 && "Invalid number of operands!");
1617     assert(
1618         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1619     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1620   }
1621 
1622   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1623     assert(N == 1 && "Invalid number of operands!");
1624     assert(
1625         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1626     Inst.addOperand(MCOperand::createReg(getReg()));
1627   }
1628 
1629   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1630     assert(N == 1 && "Invalid number of operands!");
1631     Inst.addOperand(MCOperand::createReg(getReg()));
1632   }
1633 
1634   enum VecListIndexType {
1635     VecListIdx_DReg = 0,
1636     VecListIdx_QReg = 1,
1637     VecListIdx_ZReg = 2,
1638   };
1639 
1640   template <VecListIndexType RegTy, unsigned NumRegs>
1641   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1642     assert(N == 1 && "Invalid number of operands!");
1643     static const unsigned FirstRegs[][5] = {
1644       /* DReg */ { AArch64::Q0,
1645                    AArch64::D0,       AArch64::D0_D1,
1646                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1647       /* QReg */ { AArch64::Q0,
1648                    AArch64::Q0,       AArch64::Q0_Q1,
1649                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1650       /* ZReg */ { AArch64::Z0,
1651                    AArch64::Z0,       AArch64::Z0_Z1,
1652                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1653     };
1654 
1655     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1656            " NumRegs must be <= 4 for ZRegs");
1657 
1658     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1659     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1660                                          FirstRegs[(unsigned)RegTy][0]));
1661   }
1662 
1663   void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1664     assert(N == 1 && "Invalid number of operands!");
1665     unsigned RegMask = getMatrixTileListRegMask();
1666     assert(RegMask <= 0xFF && "Invalid mask!");
1667     Inst.addOperand(MCOperand::createImm(RegMask));
1668   }
1669 
1670   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1671     assert(N == 1 && "Invalid number of operands!");
1672     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1673   }
1674 
1675   template <unsigned ImmIs0, unsigned ImmIs1>
1676   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1677     assert(N == 1 && "Invalid number of operands!");
1678     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1679     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1680   }
1681 
1682   void addImmOperands(MCInst &Inst, unsigned N) const {
1683     assert(N == 1 && "Invalid number of operands!");
1684     // If this is a pageoff symrefexpr with an addend, adjust the addend
1685     // to be only the page-offset portion. Otherwise, just add the expr
1686     // as-is.
1687     addExpr(Inst, getImm());
1688   }
1689 
1690   template <int Shift>
1691   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1692     assert(N == 2 && "Invalid number of operands!");
1693     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1694       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1695       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1696     } else if (isShiftedImm()) {
1697       addExpr(Inst, getShiftedImmVal());
1698       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1699     } else {
1700       addExpr(Inst, getImm());
1701       Inst.addOperand(MCOperand::createImm(0));
1702     }
1703   }
1704 
1705   template <int Shift>
1706   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1707     assert(N == 2 && "Invalid number of operands!");
1708     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1709       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1710       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1711     } else
1712       llvm_unreachable("Not a shifted negative immediate");
1713   }
1714 
1715   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1716     assert(N == 1 && "Invalid number of operands!");
1717     Inst.addOperand(MCOperand::createImm(getCondCode()));
1718   }
1719 
1720   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1721     assert(N == 1 && "Invalid number of operands!");
1722     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1723     if (!MCE)
1724       addExpr(Inst, getImm());
1725     else
1726       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1727   }
1728 
1729   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1730     addImmOperands(Inst, N);
1731   }
1732 
1733   template<int Scale>
1734   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1735     assert(N == 1 && "Invalid number of operands!");
1736     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1737 
1738     if (!MCE) {
1739       Inst.addOperand(MCOperand::createExpr(getImm()));
1740       return;
1741     }
1742     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1743   }
1744 
1745   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1746     assert(N == 1 && "Invalid number of operands!");
1747     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1748     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1749   }
1750 
1751   template <int Scale>
1752   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1753     assert(N == 1 && "Invalid number of operands!");
1754     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1755     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1756   }
1757 
1758   template <typename T>
1759   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1760     assert(N == 1 && "Invalid number of operands!");
1761     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1762     std::make_unsigned_t<T> Val = MCE->getValue();
1763     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1764     Inst.addOperand(MCOperand::createImm(encoding));
1765   }
1766 
1767   template <typename T>
1768   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1769     assert(N == 1 && "Invalid number of operands!");
1770     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1771     std::make_unsigned_t<T> Val = ~MCE->getValue();
1772     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1773     Inst.addOperand(MCOperand::createImm(encoding));
1774   }
1775 
1776   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1777     assert(N == 1 && "Invalid number of operands!");
1778     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1779     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1780     Inst.addOperand(MCOperand::createImm(encoding));
1781   }
1782 
1783   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1784     // Branch operands don't encode the low bits, so shift them off
1785     // here. If it's a label, however, just put it on directly as there's
1786     // not enough information now to do anything.
1787     assert(N == 1 && "Invalid number of operands!");
1788     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1789     if (!MCE) {
1790       addExpr(Inst, getImm());
1791       return;
1792     }
1793     assert(MCE && "Invalid constant immediate operand!");
1794     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1795   }
1796 
1797   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1798     // Branch operands don't encode the low bits, so shift them off
1799     // here. If it's a label, however, just put it on directly as there's
1800     // not enough information now to do anything.
1801     assert(N == 1 && "Invalid number of operands!");
1802     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1803     if (!MCE) {
1804       addExpr(Inst, getImm());
1805       return;
1806     }
1807     assert(MCE && "Invalid constant immediate operand!");
1808     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1809   }
1810 
1811   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1812     // Branch operands don't encode the low bits, so shift them off
1813     // here. If it's a label, however, just put it on directly as there's
1814     // not enough information now to do anything.
1815     assert(N == 1 && "Invalid number of operands!");
1816     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1817     if (!MCE) {
1818       addExpr(Inst, getImm());
1819       return;
1820     }
1821     assert(MCE && "Invalid constant immediate operand!");
1822     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1823   }
1824 
1825   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1826     assert(N == 1 && "Invalid number of operands!");
1827     Inst.addOperand(MCOperand::createImm(
1828         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1829   }
1830 
1831   void addBarrierOperands(MCInst &Inst, unsigned N) const {
1832     assert(N == 1 && "Invalid number of operands!");
1833     Inst.addOperand(MCOperand::createImm(getBarrier()));
1834   }
1835 
1836   void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
1837     assert(N == 1 && "Invalid number of operands!");
1838     Inst.addOperand(MCOperand::createImm(getBarrier()));
1839   }
1840 
1841   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1842     assert(N == 1 && "Invalid number of operands!");
1843 
1844     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1845   }
1846 
1847   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1848     assert(N == 1 && "Invalid number of operands!");
1849 
1850     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1851   }
1852 
1853   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1854     assert(N == 1 && "Invalid number of operands!");
1855 
1856     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1857   }
1858 
1859   void addSVCROperands(MCInst &Inst, unsigned N) const {
1860     assert(N == 1 && "Invalid number of operands!");
1861 
1862     Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
1863   }
1864 
1865   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1866     assert(N == 1 && "Invalid number of operands!");
1867 
1868     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1869   }
1870 
1871   void addSysCROperands(MCInst &Inst, unsigned N) const {
1872     assert(N == 1 && "Invalid number of operands!");
1873     Inst.addOperand(MCOperand::createImm(getSysCR()));
1874   }
1875 
1876   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1877     assert(N == 1 && "Invalid number of operands!");
1878     Inst.addOperand(MCOperand::createImm(getPrefetch()));
1879   }
1880 
1881   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1882     assert(N == 1 && "Invalid number of operands!");
1883     Inst.addOperand(MCOperand::createImm(getPSBHint()));
1884   }
1885 
1886   void addBTIHintOperands(MCInst &Inst, unsigned N) const {
1887     assert(N == 1 && "Invalid number of operands!");
1888     Inst.addOperand(MCOperand::createImm(getBTIHint()));
1889   }
1890 
1891   void addShifterOperands(MCInst &Inst, unsigned N) const {
1892     assert(N == 1 && "Invalid number of operands!");
1893     unsigned Imm =
1894         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1895     Inst.addOperand(MCOperand::createImm(Imm));
1896   }
1897 
1898   void addExtendOperands(MCInst &Inst, unsigned N) const {
1899     assert(N == 1 && "Invalid number of operands!");
1900     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1901     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1902     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1903     Inst.addOperand(MCOperand::createImm(Imm));
1904   }
1905 
1906   void addExtend64Operands(MCInst &Inst, unsigned N) const {
1907     assert(N == 1 && "Invalid number of operands!");
1908     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1909     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1910     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1911     Inst.addOperand(MCOperand::createImm(Imm));
1912   }
1913 
1914   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1915     assert(N == 2 && "Invalid number of operands!");
1916     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1917     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1918     Inst.addOperand(MCOperand::createImm(IsSigned));
1919     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1920   }
1921 
1922   // For 8-bit load/store instructions with a register offset, both the
1923   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1924   // they're disambiguated by whether the shift was explicit or implicit rather
1925   // than its size.
1926   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1927     assert(N == 2 && "Invalid number of operands!");
1928     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1929     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1930     Inst.addOperand(MCOperand::createImm(IsSigned));
1931     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1932   }
1933 
1934   template<int Shift>
1935   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1936     assert(N == 1 && "Invalid number of operands!");
1937 
1938     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939     if (CE) {
1940       uint64_t Value = CE->getValue();
1941       Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1942     } else {
1943       addExpr(Inst, getImm());
1944     }
1945   }
1946 
1947   template<int Shift>
1948   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1949     assert(N == 1 && "Invalid number of operands!");
1950 
1951     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1952     uint64_t Value = CE->getValue();
1953     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1954   }
1955 
1956   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1957     assert(N == 1 && "Invalid number of operands!");
1958     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1959     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1960   }
1961 
1962   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1963     assert(N == 1 && "Invalid number of operands!");
1964     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1965     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1966   }
1967 
1968   void print(raw_ostream &OS) const override;
1969 
1970   static std::unique_ptr<AArch64Operand>
1971   CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
1972     auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1973     Op->Tok.Data = Str.data();
1974     Op->Tok.Length = Str.size();
1975     Op->Tok.IsSuffix = IsSuffix;
1976     Op->StartLoc = S;
1977     Op->EndLoc = S;
1978     return Op;
1979   }
1980 
1981   static std::unique_ptr<AArch64Operand>
1982   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1983             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1984             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1985             unsigned ShiftAmount = 0,
1986             unsigned HasExplicitAmount = false) {
1987     auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1988     Op->Reg.RegNum = RegNum;
1989     Op->Reg.Kind = Kind;
1990     Op->Reg.ElementWidth = 0;
1991     Op->Reg.EqualityTy = EqTy;
1992     Op->Reg.ShiftExtend.Type = ExtTy;
1993     Op->Reg.ShiftExtend.Amount = ShiftAmount;
1994     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1995     Op->StartLoc = S;
1996     Op->EndLoc = E;
1997     return Op;
1998   }
1999 
2000   static std::unique_ptr<AArch64Operand>
2001   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2002                   SMLoc S, SMLoc E, MCContext &Ctx,
2003                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2004                   unsigned ShiftAmount = 0,
2005                   unsigned HasExplicitAmount = false) {
2006     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2007             Kind == RegKind::SVEPredicateVector) &&
2008            "Invalid vector kind");
2009     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2010                         HasExplicitAmount);
2011     Op->Reg.ElementWidth = ElementWidth;
2012     return Op;
2013   }
2014 
2015   static std::unique_ptr<AArch64Operand>
2016   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
2017                    unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
2018                    MCContext &Ctx) {
2019     auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2020     Op->VectorList.RegNum = RegNum;
2021     Op->VectorList.Count = Count;
2022     Op->VectorList.NumElements = NumElements;
2023     Op->VectorList.ElementWidth = ElementWidth;
2024     Op->VectorList.RegisterKind = RegisterKind;
2025     Op->StartLoc = S;
2026     Op->EndLoc = E;
2027     return Op;
2028   }
2029 
2030   static std::unique_ptr<AArch64Operand>
2031   CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2032     auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2033     Op->VectorIndex.Val = Idx;
2034     Op->StartLoc = S;
2035     Op->EndLoc = E;
2036     return Op;
2037   }
2038 
2039   static std::unique_ptr<AArch64Operand>
2040   CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2041     auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2042     Op->MatrixTileList.RegMask = RegMask;
2043     Op->StartLoc = S;
2044     Op->EndLoc = E;
2045     return Op;
2046   }
2047 
2048   static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2049                                   const unsigned ElementWidth) {
2050     static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2051         RegMap = {
2052             {{0, AArch64::ZAB0},
2053              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2054               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2055             {{8, AArch64::ZAB0},
2056              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2057               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2058             {{16, AArch64::ZAH0},
2059              {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2060             {{16, AArch64::ZAH1},
2061              {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2062             {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2063             {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2064             {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2065             {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2066         };
2067 
2068     if (ElementWidth == 64)
2069       OutRegs.insert(Reg);
2070     else {
2071       std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2072       assert(!Regs.empty() && "Invalid tile or element width!");
2073       for (auto OutReg : Regs)
2074         OutRegs.insert(OutReg);
2075     }
2076   }
2077 
2078   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2079                                                    SMLoc E, MCContext &Ctx) {
2080     auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2081     Op->Imm.Val = Val;
2082     Op->StartLoc = S;
2083     Op->EndLoc = E;
2084     return Op;
2085   }
2086 
2087   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2088                                                           unsigned ShiftAmount,
2089                                                           SMLoc S, SMLoc E,
2090                                                           MCContext &Ctx) {
2091     auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2092     Op->ShiftedImm .Val = Val;
2093     Op->ShiftedImm.ShiftAmount = ShiftAmount;
2094     Op->StartLoc = S;
2095     Op->EndLoc = E;
2096     return Op;
2097   }
2098 
2099   static std::unique_ptr<AArch64Operand>
2100   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2101     auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2102     Op->CondCode.Code = Code;
2103     Op->StartLoc = S;
2104     Op->EndLoc = E;
2105     return Op;
2106   }
2107 
2108   static std::unique_ptr<AArch64Operand>
2109   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2110     auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2111     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2112     Op->FPImm.IsExact = IsExact;
2113     Op->StartLoc = S;
2114     Op->EndLoc = S;
2115     return Op;
2116   }
2117 
2118   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2119                                                        StringRef Str,
2120                                                        SMLoc S,
2121                                                        MCContext &Ctx,
2122                                                        bool HasnXSModifier) {
2123     auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2124     Op->Barrier.Val = Val;
2125     Op->Barrier.Data = Str.data();
2126     Op->Barrier.Length = Str.size();
2127     Op->Barrier.HasnXSModifier = HasnXSModifier;
2128     Op->StartLoc = S;
2129     Op->EndLoc = S;
2130     return Op;
2131   }
2132 
2133   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2134                                                       uint32_t MRSReg,
2135                                                       uint32_t MSRReg,
2136                                                       uint32_t PStateField,
2137                                                       MCContext &Ctx) {
2138     auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2139     Op->SysReg.Data = Str.data();
2140     Op->SysReg.Length = Str.size();
2141     Op->SysReg.MRSReg = MRSReg;
2142     Op->SysReg.MSRReg = MSRReg;
2143     Op->SysReg.PStateField = PStateField;
2144     Op->StartLoc = S;
2145     Op->EndLoc = S;
2146     return Op;
2147   }
2148 
2149   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2150                                                      SMLoc E, MCContext &Ctx) {
2151     auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2152     Op->SysCRImm.Val = Val;
2153     Op->StartLoc = S;
2154     Op->EndLoc = E;
2155     return Op;
2156   }
2157 
2158   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2159                                                         StringRef Str,
2160                                                         SMLoc S,
2161                                                         MCContext &Ctx) {
2162     auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2163     Op->Prefetch.Val = Val;
2164     Op->Barrier.Data = Str.data();
2165     Op->Barrier.Length = Str.size();
2166     Op->StartLoc = S;
2167     Op->EndLoc = S;
2168     return Op;
2169   }
2170 
2171   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2172                                                        StringRef Str,
2173                                                        SMLoc S,
2174                                                        MCContext &Ctx) {
2175     auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2176     Op->PSBHint.Val = Val;
2177     Op->PSBHint.Data = Str.data();
2178     Op->PSBHint.Length = Str.size();
2179     Op->StartLoc = S;
2180     Op->EndLoc = S;
2181     return Op;
2182   }
2183 
2184   static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2185                                                        StringRef Str,
2186                                                        SMLoc S,
2187                                                        MCContext &Ctx) {
2188     auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2189     Op->BTIHint.Val = Val | 32;
2190     Op->BTIHint.Data = Str.data();
2191     Op->BTIHint.Length = Str.size();
2192     Op->StartLoc = S;
2193     Op->EndLoc = S;
2194     return Op;
2195   }
2196 
2197   static std::unique_ptr<AArch64Operand>
2198   CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2199                        SMLoc S, SMLoc E, MCContext &Ctx) {
2200     auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2201     Op->MatrixReg.RegNum = RegNum;
2202     Op->MatrixReg.ElementWidth = ElementWidth;
2203     Op->MatrixReg.Kind = Kind;
2204     Op->StartLoc = S;
2205     Op->EndLoc = E;
2206     return Op;
2207   }
2208 
2209   static std::unique_ptr<AArch64Operand>
2210   CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2211     auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2212     Op->SVCR.PStateField = PStateField;
2213     Op->SVCR.Data = Str.data();
2214     Op->SVCR.Length = Str.size();
2215     Op->StartLoc = S;
2216     Op->EndLoc = S;
2217     return Op;
2218   }
2219 
2220   static std::unique_ptr<AArch64Operand>
2221   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2222                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2223     auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2224     Op->ShiftExtend.Type = ShOp;
2225     Op->ShiftExtend.Amount = Val;
2226     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2227     Op->StartLoc = S;
2228     Op->EndLoc = E;
2229     return Op;
2230   }
2231 };
2232 
2233 } // end anonymous namespace.
2234 
2235 void AArch64Operand::print(raw_ostream &OS) const {
2236   switch (Kind) {
2237   case k_FPImm:
2238     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2239     if (!getFPImmIsExact())
2240       OS << " (inexact)";
2241     OS << ">";
2242     break;
2243   case k_Barrier: {
2244     StringRef Name = getBarrierName();
2245     if (!Name.empty())
2246       OS << "<barrier " << Name << ">";
2247     else
2248       OS << "<barrier invalid #" << getBarrier() << ">";
2249     break;
2250   }
2251   case k_Immediate:
2252     OS << *getImm();
2253     break;
2254   case k_ShiftedImm: {
2255     unsigned Shift = getShiftedImmShift();
2256     OS << "<shiftedimm ";
2257     OS << *getShiftedImmVal();
2258     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2259     break;
2260   }
2261   case k_CondCode:
2262     OS << "<condcode " << getCondCode() << ">";
2263     break;
2264   case k_VectorList: {
2265     OS << "<vectorlist ";
2266     unsigned Reg = getVectorListStart();
2267     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2268       OS << Reg + i << " ";
2269     OS << ">";
2270     break;
2271   }
2272   case k_VectorIndex:
2273     OS << "<vectorindex " << getVectorIndex() << ">";
2274     break;
2275   case k_SysReg:
2276     OS << "<sysreg: " << getSysReg() << '>';
2277     break;
2278   case k_Token:
2279     OS << "'" << getToken() << "'";
2280     break;
2281   case k_SysCR:
2282     OS << "c" << getSysCR();
2283     break;
2284   case k_Prefetch: {
2285     StringRef Name = getPrefetchName();
2286     if (!Name.empty())
2287       OS << "<prfop " << Name << ">";
2288     else
2289       OS << "<prfop invalid #" << getPrefetch() << ">";
2290     break;
2291   }
2292   case k_PSBHint:
2293     OS << getPSBHintName();
2294     break;
2295   case k_BTIHint:
2296     OS << getBTIHintName();
2297     break;
2298   case k_MatrixRegister:
2299     OS << "<matrix " << getMatrixReg() << ">";
2300     break;
2301   case k_MatrixTileList: {
2302     OS << "<matrixlist ";
2303     unsigned RegMask = getMatrixTileListRegMask();
2304     unsigned MaxBits = 8;
2305     for (unsigned I = MaxBits; I > 0; --I)
2306       OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2307     OS << '>';
2308     break;
2309   }
2310   case k_SVCR: {
2311     OS << getSVCR();
2312     break;
2313   }
2314   case k_Register:
2315     OS << "<register " << getReg() << ">";
2316     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2317       break;
2318     LLVM_FALLTHROUGH;
2319   case k_ShiftExtend:
2320     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2321        << getShiftExtendAmount();
2322     if (!hasShiftExtendAmount())
2323       OS << "<imp>";
2324     OS << '>';
2325     break;
2326   }
2327 }
2328 
2329 /// @name Auto-generated Match Functions
2330 /// {
2331 
2332 static unsigned MatchRegisterName(StringRef Name);
2333 
2334 /// }
2335 
2336 static unsigned MatchNeonVectorRegName(StringRef Name) {
2337   return StringSwitch<unsigned>(Name.lower())
2338       .Case("v0", AArch64::Q0)
2339       .Case("v1", AArch64::Q1)
2340       .Case("v2", AArch64::Q2)
2341       .Case("v3", AArch64::Q3)
2342       .Case("v4", AArch64::Q4)
2343       .Case("v5", AArch64::Q5)
2344       .Case("v6", AArch64::Q6)
2345       .Case("v7", AArch64::Q7)
2346       .Case("v8", AArch64::Q8)
2347       .Case("v9", AArch64::Q9)
2348       .Case("v10", AArch64::Q10)
2349       .Case("v11", AArch64::Q11)
2350       .Case("v12", AArch64::Q12)
2351       .Case("v13", AArch64::Q13)
2352       .Case("v14", AArch64::Q14)
2353       .Case("v15", AArch64::Q15)
2354       .Case("v16", AArch64::Q16)
2355       .Case("v17", AArch64::Q17)
2356       .Case("v18", AArch64::Q18)
2357       .Case("v19", AArch64::Q19)
2358       .Case("v20", AArch64::Q20)
2359       .Case("v21", AArch64::Q21)
2360       .Case("v22", AArch64::Q22)
2361       .Case("v23", AArch64::Q23)
2362       .Case("v24", AArch64::Q24)
2363       .Case("v25", AArch64::Q25)
2364       .Case("v26", AArch64::Q26)
2365       .Case("v27", AArch64::Q27)
2366       .Case("v28", AArch64::Q28)
2367       .Case("v29", AArch64::Q29)
2368       .Case("v30", AArch64::Q30)
2369       .Case("v31", AArch64::Q31)
2370       .Default(0);
2371 }
2372 
2373 /// Returns an optional pair of (#elements, element-width) if Suffix
2374 /// is a valid vector kind. Where the number of elements in a vector
2375 /// or the vector width is implicit or explicitly unknown (but still a
2376 /// valid suffix kind), 0 is used.
2377 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2378                                                      RegKind VectorKind) {
2379   std::pair<int, int> Res = {-1, -1};
2380 
2381   switch (VectorKind) {
2382   case RegKind::NeonVector:
2383     Res =
2384         StringSwitch<std::pair<int, int>>(Suffix.lower())
2385             .Case("", {0, 0})
2386             .Case(".1d", {1, 64})
2387             .Case(".1q", {1, 128})
2388             // '.2h' needed for fp16 scalar pairwise reductions
2389             .Case(".2h", {2, 16})
2390             .Case(".2s", {2, 32})
2391             .Case(".2d", {2, 64})
2392             // '.4b' is another special case for the ARMv8.2a dot product
2393             // operand
2394             .Case(".4b", {4, 8})
2395             .Case(".4h", {4, 16})
2396             .Case(".4s", {4, 32})
2397             .Case(".8b", {8, 8})
2398             .Case(".8h", {8, 16})
2399             .Case(".16b", {16, 8})
2400             // Accept the width neutral ones, too, for verbose syntax. If those
2401             // aren't used in the right places, the token operand won't match so
2402             // all will work out.
2403             .Case(".b", {0, 8})
2404             .Case(".h", {0, 16})
2405             .Case(".s", {0, 32})
2406             .Case(".d", {0, 64})
2407             .Default({-1, -1});
2408     break;
2409   case RegKind::SVEPredicateVector:
2410   case RegKind::SVEDataVector:
2411   case RegKind::Matrix:
2412     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2413               .Case("", {0, 0})
2414               .Case(".b", {0, 8})
2415               .Case(".h", {0, 16})
2416               .Case(".s", {0, 32})
2417               .Case(".d", {0, 64})
2418               .Case(".q", {0, 128})
2419               .Default({-1, -1});
2420     break;
2421   default:
2422     llvm_unreachable("Unsupported RegKind");
2423   }
2424 
2425   if (Res == std::make_pair(-1, -1))
2426     return Optional<std::pair<int, int>>();
2427 
2428   return Optional<std::pair<int, int>>(Res);
2429 }
2430 
2431 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2432   return parseVectorKind(Suffix, VectorKind).hasValue();
2433 }
2434 
2435 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2436   return StringSwitch<unsigned>(Name.lower())
2437       .Case("z0", AArch64::Z0)
2438       .Case("z1", AArch64::Z1)
2439       .Case("z2", AArch64::Z2)
2440       .Case("z3", AArch64::Z3)
2441       .Case("z4", AArch64::Z4)
2442       .Case("z5", AArch64::Z5)
2443       .Case("z6", AArch64::Z6)
2444       .Case("z7", AArch64::Z7)
2445       .Case("z8", AArch64::Z8)
2446       .Case("z9", AArch64::Z9)
2447       .Case("z10", AArch64::Z10)
2448       .Case("z11", AArch64::Z11)
2449       .Case("z12", AArch64::Z12)
2450       .Case("z13", AArch64::Z13)
2451       .Case("z14", AArch64::Z14)
2452       .Case("z15", AArch64::Z15)
2453       .Case("z16", AArch64::Z16)
2454       .Case("z17", AArch64::Z17)
2455       .Case("z18", AArch64::Z18)
2456       .Case("z19", AArch64::Z19)
2457       .Case("z20", AArch64::Z20)
2458       .Case("z21", AArch64::Z21)
2459       .Case("z22", AArch64::Z22)
2460       .Case("z23", AArch64::Z23)
2461       .Case("z24", AArch64::Z24)
2462       .Case("z25", AArch64::Z25)
2463       .Case("z26", AArch64::Z26)
2464       .Case("z27", AArch64::Z27)
2465       .Case("z28", AArch64::Z28)
2466       .Case("z29", AArch64::Z29)
2467       .Case("z30", AArch64::Z30)
2468       .Case("z31", AArch64::Z31)
2469       .Default(0);
2470 }
2471 
2472 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2473   return StringSwitch<unsigned>(Name.lower())
2474       .Case("p0", AArch64::P0)
2475       .Case("p1", AArch64::P1)
2476       .Case("p2", AArch64::P2)
2477       .Case("p3", AArch64::P3)
2478       .Case("p4", AArch64::P4)
2479       .Case("p5", AArch64::P5)
2480       .Case("p6", AArch64::P6)
2481       .Case("p7", AArch64::P7)
2482       .Case("p8", AArch64::P8)
2483       .Case("p9", AArch64::P9)
2484       .Case("p10", AArch64::P10)
2485       .Case("p11", AArch64::P11)
2486       .Case("p12", AArch64::P12)
2487       .Case("p13", AArch64::P13)
2488       .Case("p14", AArch64::P14)
2489       .Case("p15", AArch64::P15)
2490       .Default(0);
2491 }
2492 
2493 static unsigned matchMatrixTileListRegName(StringRef Name) {
2494   return StringSwitch<unsigned>(Name.lower())
2495       .Case("za0.d", AArch64::ZAD0)
2496       .Case("za1.d", AArch64::ZAD1)
2497       .Case("za2.d", AArch64::ZAD2)
2498       .Case("za3.d", AArch64::ZAD3)
2499       .Case("za4.d", AArch64::ZAD4)
2500       .Case("za5.d", AArch64::ZAD5)
2501       .Case("za6.d", AArch64::ZAD6)
2502       .Case("za7.d", AArch64::ZAD7)
2503       .Case("za0.s", AArch64::ZAS0)
2504       .Case("za1.s", AArch64::ZAS1)
2505       .Case("za2.s", AArch64::ZAS2)
2506       .Case("za3.s", AArch64::ZAS3)
2507       .Case("za0.h", AArch64::ZAH0)
2508       .Case("za1.h", AArch64::ZAH1)
2509       .Case("za0.b", AArch64::ZAB0)
2510       .Default(0);
2511 }
2512 
2513 static unsigned matchMatrixRegName(StringRef Name) {
2514   return StringSwitch<unsigned>(Name.lower())
2515       .Case("za", AArch64::ZA)
2516       .Case("za0.q", AArch64::ZAQ0)
2517       .Case("za1.q", AArch64::ZAQ1)
2518       .Case("za2.q", AArch64::ZAQ2)
2519       .Case("za3.q", AArch64::ZAQ3)
2520       .Case("za4.q", AArch64::ZAQ4)
2521       .Case("za5.q", AArch64::ZAQ5)
2522       .Case("za6.q", AArch64::ZAQ6)
2523       .Case("za7.q", AArch64::ZAQ7)
2524       .Case("za8.q", AArch64::ZAQ8)
2525       .Case("za9.q", AArch64::ZAQ9)
2526       .Case("za10.q", AArch64::ZAQ10)
2527       .Case("za11.q", AArch64::ZAQ11)
2528       .Case("za12.q", AArch64::ZAQ12)
2529       .Case("za13.q", AArch64::ZAQ13)
2530       .Case("za14.q", AArch64::ZAQ14)
2531       .Case("za15.q", AArch64::ZAQ15)
2532       .Case("za0.d", AArch64::ZAD0)
2533       .Case("za1.d", AArch64::ZAD1)
2534       .Case("za2.d", AArch64::ZAD2)
2535       .Case("za3.d", AArch64::ZAD3)
2536       .Case("za4.d", AArch64::ZAD4)
2537       .Case("za5.d", AArch64::ZAD5)
2538       .Case("za6.d", AArch64::ZAD6)
2539       .Case("za7.d", AArch64::ZAD7)
2540       .Case("za0.s", AArch64::ZAS0)
2541       .Case("za1.s", AArch64::ZAS1)
2542       .Case("za2.s", AArch64::ZAS2)
2543       .Case("za3.s", AArch64::ZAS3)
2544       .Case("za0.h", AArch64::ZAH0)
2545       .Case("za1.h", AArch64::ZAH1)
2546       .Case("za0.b", AArch64::ZAB0)
2547       .Case("za0h.q", AArch64::ZAQ0)
2548       .Case("za1h.q", AArch64::ZAQ1)
2549       .Case("za2h.q", AArch64::ZAQ2)
2550       .Case("za3h.q", AArch64::ZAQ3)
2551       .Case("za4h.q", AArch64::ZAQ4)
2552       .Case("za5h.q", AArch64::ZAQ5)
2553       .Case("za6h.q", AArch64::ZAQ6)
2554       .Case("za7h.q", AArch64::ZAQ7)
2555       .Case("za8h.q", AArch64::ZAQ8)
2556       .Case("za9h.q", AArch64::ZAQ9)
2557       .Case("za10h.q", AArch64::ZAQ10)
2558       .Case("za11h.q", AArch64::ZAQ11)
2559       .Case("za12h.q", AArch64::ZAQ12)
2560       .Case("za13h.q", AArch64::ZAQ13)
2561       .Case("za14h.q", AArch64::ZAQ14)
2562       .Case("za15h.q", AArch64::ZAQ15)
2563       .Case("za0h.d", AArch64::ZAD0)
2564       .Case("za1h.d", AArch64::ZAD1)
2565       .Case("za2h.d", AArch64::ZAD2)
2566       .Case("za3h.d", AArch64::ZAD3)
2567       .Case("za4h.d", AArch64::ZAD4)
2568       .Case("za5h.d", AArch64::ZAD5)
2569       .Case("za6h.d", AArch64::ZAD6)
2570       .Case("za7h.d", AArch64::ZAD7)
2571       .Case("za0h.s", AArch64::ZAS0)
2572       .Case("za1h.s", AArch64::ZAS1)
2573       .Case("za2h.s", AArch64::ZAS2)
2574       .Case("za3h.s", AArch64::ZAS3)
2575       .Case("za0h.h", AArch64::ZAH0)
2576       .Case("za1h.h", AArch64::ZAH1)
2577       .Case("za0h.b", AArch64::ZAB0)
2578       .Case("za0v.q", AArch64::ZAQ0)
2579       .Case("za1v.q", AArch64::ZAQ1)
2580       .Case("za2v.q", AArch64::ZAQ2)
2581       .Case("za3v.q", AArch64::ZAQ3)
2582       .Case("za4v.q", AArch64::ZAQ4)
2583       .Case("za5v.q", AArch64::ZAQ5)
2584       .Case("za6v.q", AArch64::ZAQ6)
2585       .Case("za7v.q", AArch64::ZAQ7)
2586       .Case("za8v.q", AArch64::ZAQ8)
2587       .Case("za9v.q", AArch64::ZAQ9)
2588       .Case("za10v.q", AArch64::ZAQ10)
2589       .Case("za11v.q", AArch64::ZAQ11)
2590       .Case("za12v.q", AArch64::ZAQ12)
2591       .Case("za13v.q", AArch64::ZAQ13)
2592       .Case("za14v.q", AArch64::ZAQ14)
2593       .Case("za15v.q", AArch64::ZAQ15)
2594       .Case("za0v.d", AArch64::ZAD0)
2595       .Case("za1v.d", AArch64::ZAD1)
2596       .Case("za2v.d", AArch64::ZAD2)
2597       .Case("za3v.d", AArch64::ZAD3)
2598       .Case("za4v.d", AArch64::ZAD4)
2599       .Case("za5v.d", AArch64::ZAD5)
2600       .Case("za6v.d", AArch64::ZAD6)
2601       .Case("za7v.d", AArch64::ZAD7)
2602       .Case("za0v.s", AArch64::ZAS0)
2603       .Case("za1v.s", AArch64::ZAS1)
2604       .Case("za2v.s", AArch64::ZAS2)
2605       .Case("za3v.s", AArch64::ZAS3)
2606       .Case("za0v.h", AArch64::ZAH0)
2607       .Case("za1v.h", AArch64::ZAH1)
2608       .Case("za0v.b", AArch64::ZAB0)
2609       .Default(0);
2610 }
2611 
2612 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2613                                      SMLoc &EndLoc) {
2614   return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2615 }
2616 
2617 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2618                                                         SMLoc &StartLoc,
2619                                                         SMLoc &EndLoc) {
2620   StartLoc = getLoc();
2621   auto Res = tryParseScalarRegister(RegNo);
2622   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2623   return Res;
2624 }
2625 
2626 // Matches a register name or register alias previously defined by '.req'
2627 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2628                                                   RegKind Kind) {
2629   unsigned RegNum = 0;
2630   if ((RegNum = matchSVEDataVectorRegName(Name)))
2631     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2632 
2633   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2634     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2635 
2636   if ((RegNum = MatchNeonVectorRegName(Name)))
2637     return Kind == RegKind::NeonVector ? RegNum : 0;
2638 
2639   if ((RegNum = matchMatrixRegName(Name)))
2640     return Kind == RegKind::Matrix ? RegNum : 0;
2641 
2642   // The parsed register must be of RegKind Scalar
2643   if ((RegNum = MatchRegisterName(Name)))
2644     return Kind == RegKind::Scalar ? RegNum : 0;
2645 
2646   if (!RegNum) {
2647     // Handle a few common aliases of registers.
2648     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2649                     .Case("fp", AArch64::FP)
2650                     .Case("lr",  AArch64::LR)
2651                     .Case("x31", AArch64::XZR)
2652                     .Case("w31", AArch64::WZR)
2653                     .Default(0))
2654       return Kind == RegKind::Scalar ? RegNum : 0;
2655 
2656     // Check for aliases registered via .req. Canonicalize to lower case.
2657     // That's more consistent since register names are case insensitive, and
2658     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2659     auto Entry = RegisterReqs.find(Name.lower());
2660     if (Entry == RegisterReqs.end())
2661       return 0;
2662 
2663     // set RegNum if the match is the right kind of register
2664     if (Kind == Entry->getValue().first)
2665       RegNum = Entry->getValue().second;
2666   }
2667   return RegNum;
2668 }
2669 
2670 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2671 /// Identifier when called, and if it is a register name the token is eaten and
2672 /// the register is added to the operand list.
2673 OperandMatchResultTy
2674 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2675   MCAsmParser &Parser = getParser();
2676   const AsmToken &Tok = Parser.getTok();
2677   if (Tok.isNot(AsmToken::Identifier))
2678     return MatchOperand_NoMatch;
2679 
2680   std::string lowerCase = Tok.getString().lower();
2681   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2682   if (Reg == 0)
2683     return MatchOperand_NoMatch;
2684 
2685   RegNum = Reg;
2686   Parser.Lex(); // Eat identifier token.
2687   return MatchOperand_Success;
2688 }
2689 
2690 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2691 OperandMatchResultTy
2692 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2693   MCAsmParser &Parser = getParser();
2694   SMLoc S = getLoc();
2695 
2696   if (Parser.getTok().isNot(AsmToken::Identifier)) {
2697     Error(S, "Expected cN operand where 0 <= N <= 15");
2698     return MatchOperand_ParseFail;
2699   }
2700 
2701   StringRef Tok = Parser.getTok().getIdentifier();
2702   if (Tok[0] != 'c' && Tok[0] != 'C') {
2703     Error(S, "Expected cN operand where 0 <= N <= 15");
2704     return MatchOperand_ParseFail;
2705   }
2706 
2707   uint32_t CRNum;
2708   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2709   if (BadNum || CRNum > 15) {
2710     Error(S, "Expected cN operand where 0 <= N <= 15");
2711     return MatchOperand_ParseFail;
2712   }
2713 
2714   Parser.Lex(); // Eat identifier token.
2715   Operands.push_back(
2716       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2717   return MatchOperand_Success;
2718 }
2719 
2720 /// tryParsePrefetch - Try to parse a prefetch operand.
2721 template <bool IsSVEPrefetch>
2722 OperandMatchResultTy
2723 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2724   MCAsmParser &Parser = getParser();
2725   SMLoc S = getLoc();
2726   const AsmToken &Tok = Parser.getTok();
2727 
2728   auto LookupByName = [](StringRef N) {
2729     if (IsSVEPrefetch) {
2730       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2731         return Optional<unsigned>(Res->Encoding);
2732     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2733       return Optional<unsigned>(Res->Encoding);
2734     return Optional<unsigned>();
2735   };
2736 
2737   auto LookupByEncoding = [](unsigned E) {
2738     if (IsSVEPrefetch) {
2739       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2740         return Optional<StringRef>(Res->Name);
2741     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2742       return Optional<StringRef>(Res->Name);
2743     return Optional<StringRef>();
2744   };
2745   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2746 
2747   // Either an identifier for named values or a 5-bit immediate.
2748   // Eat optional hash.
2749   if (parseOptionalToken(AsmToken::Hash) ||
2750       Tok.is(AsmToken::Integer)) {
2751     const MCExpr *ImmVal;
2752     if (getParser().parseExpression(ImmVal))
2753       return MatchOperand_ParseFail;
2754 
2755     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2756     if (!MCE) {
2757       TokError("immediate value expected for prefetch operand");
2758       return MatchOperand_ParseFail;
2759     }
2760     unsigned prfop = MCE->getValue();
2761     if (prfop > MaxVal) {
2762       TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2763                "] expected");
2764       return MatchOperand_ParseFail;
2765     }
2766 
2767     auto PRFM = LookupByEncoding(MCE->getValue());
2768     Operands.push_back(AArch64Operand::CreatePrefetch(
2769         prfop, PRFM.getValueOr(""), S, getContext()));
2770     return MatchOperand_Success;
2771   }
2772 
2773   if (Tok.isNot(AsmToken::Identifier)) {
2774     TokError("prefetch hint expected");
2775     return MatchOperand_ParseFail;
2776   }
2777 
2778   auto PRFM = LookupByName(Tok.getString());
2779   if (!PRFM) {
2780     TokError("prefetch hint expected");
2781     return MatchOperand_ParseFail;
2782   }
2783 
2784   Operands.push_back(AArch64Operand::CreatePrefetch(
2785       *PRFM, Tok.getString(), S, getContext()));
2786   Parser.Lex(); // Eat identifier token.
2787   return MatchOperand_Success;
2788 }
2789 
2790 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2791 OperandMatchResultTy
2792 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2793   MCAsmParser &Parser = getParser();
2794   SMLoc S = getLoc();
2795   const AsmToken &Tok = Parser.getTok();
2796   if (Tok.isNot(AsmToken::Identifier)) {
2797     TokError("invalid operand for instruction");
2798     return MatchOperand_ParseFail;
2799   }
2800 
2801   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2802   if (!PSB) {
2803     TokError("invalid operand for instruction");
2804     return MatchOperand_ParseFail;
2805   }
2806 
2807   Operands.push_back(AArch64Operand::CreatePSBHint(
2808       PSB->Encoding, Tok.getString(), S, getContext()));
2809   Parser.Lex(); // Eat identifier token.
2810   return MatchOperand_Success;
2811 }
2812 
2813 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
2814 OperandMatchResultTy
2815 AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
2816   MCAsmParser &Parser = getParser();
2817   SMLoc S = getLoc();
2818   const AsmToken &Tok = Parser.getTok();
2819   if (Tok.isNot(AsmToken::Identifier)) {
2820     TokError("invalid operand for instruction");
2821     return MatchOperand_ParseFail;
2822   }
2823 
2824   auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
2825   if (!BTI) {
2826     TokError("invalid operand for instruction");
2827     return MatchOperand_ParseFail;
2828   }
2829 
2830   Operands.push_back(AArch64Operand::CreateBTIHint(
2831       BTI->Encoding, Tok.getString(), S, getContext()));
2832   Parser.Lex(); // Eat identifier token.
2833   return MatchOperand_Success;
2834 }
2835 
2836 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2837 /// instruction.
2838 OperandMatchResultTy
2839 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2840   MCAsmParser &Parser = getParser();
2841   SMLoc S = getLoc();
2842   const MCExpr *Expr = nullptr;
2843 
2844   if (Parser.getTok().is(AsmToken::Hash)) {
2845     Parser.Lex(); // Eat hash token.
2846   }
2847 
2848   if (parseSymbolicImmVal(Expr))
2849     return MatchOperand_ParseFail;
2850 
2851   AArch64MCExpr::VariantKind ELFRefKind;
2852   MCSymbolRefExpr::VariantKind DarwinRefKind;
2853   int64_t Addend;
2854   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2855     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2856         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2857       // No modifier was specified at all; this is the syntax for an ELF basic
2858       // ADRP relocation (unfortunately).
2859       Expr =
2860           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2861     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2862                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2863                Addend != 0) {
2864       Error(S, "gotpage label reference not allowed an addend");
2865       return MatchOperand_ParseFail;
2866     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2867                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2868                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2869                ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
2870                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2871                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
2872                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2873                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2874       // The operand must be an @page or @gotpage qualified symbolref.
2875       Error(S, "page or gotpage label reference expected");
2876       return MatchOperand_ParseFail;
2877     }
2878   }
2879 
2880   // We have either a label reference possibly with addend or an immediate. The
2881   // addend is a raw value here. The linker will adjust it to only reference the
2882   // page.
2883   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2884   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2885 
2886   return MatchOperand_Success;
2887 }
2888 
2889 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2890 /// instruction.
2891 OperandMatchResultTy
2892 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2893   SMLoc S = getLoc();
2894   const MCExpr *Expr = nullptr;
2895 
2896   // Leave anything with a bracket to the default for SVE
2897   if (getParser().getTok().is(AsmToken::LBrac))
2898     return MatchOperand_NoMatch;
2899 
2900   if (getParser().getTok().is(AsmToken::Hash))
2901     getParser().Lex(); // Eat hash token.
2902 
2903   if (parseSymbolicImmVal(Expr))
2904     return MatchOperand_ParseFail;
2905 
2906   AArch64MCExpr::VariantKind ELFRefKind;
2907   MCSymbolRefExpr::VariantKind DarwinRefKind;
2908   int64_t Addend;
2909   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2910     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2911         ELFRefKind == AArch64MCExpr::VK_INVALID) {
2912       // No modifier was specified at all; this is the syntax for an ELF basic
2913       // ADR relocation (unfortunately).
2914       Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
2915     } else {
2916       Error(S, "unexpected adr label");
2917       return MatchOperand_ParseFail;
2918     }
2919   }
2920 
2921   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2922   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2923   return MatchOperand_Success;
2924 }
2925 
2926 /// tryParseFPImm - A floating point immediate expression operand.
2927 template<bool AddFPZeroAsLiteral>
2928 OperandMatchResultTy
2929 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2930   MCAsmParser &Parser = getParser();
2931   SMLoc S = getLoc();
2932 
2933   bool Hash = parseOptionalToken(AsmToken::Hash);
2934 
2935   // Handle negation, as that still comes through as a separate token.
2936   bool isNegative = parseOptionalToken(AsmToken::Minus);
2937 
2938   const AsmToken &Tok = Parser.getTok();
2939   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2940     if (!Hash)
2941       return MatchOperand_NoMatch;
2942     TokError("invalid floating point immediate");
2943     return MatchOperand_ParseFail;
2944   }
2945 
2946   // Parse hexadecimal representation.
2947   if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2948     if (Tok.getIntVal() > 255 || isNegative) {
2949       TokError("encoded floating point value out of range");
2950       return MatchOperand_ParseFail;
2951     }
2952 
2953     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2954     Operands.push_back(
2955         AArch64Operand::CreateFPImm(F, true, S, getContext()));
2956   } else {
2957     // Parse FP representation.
2958     APFloat RealVal(APFloat::IEEEdouble());
2959     auto StatusOrErr =
2960         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2961     if (errorToBool(StatusOrErr.takeError())) {
2962       TokError("invalid floating point representation");
2963       return MatchOperand_ParseFail;
2964     }
2965 
2966     if (isNegative)
2967       RealVal.changeSign();
2968 
2969     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2970       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
2971       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
2972     } else
2973       Operands.push_back(AArch64Operand::CreateFPImm(
2974           RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
2975   }
2976 
2977   Parser.Lex(); // Eat the token.
2978 
2979   return MatchOperand_Success;
2980 }
2981 
2982 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2983 /// a shift suffix, for example '#1, lsl #12'.
2984 OperandMatchResultTy
2985 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2986   MCAsmParser &Parser = getParser();
2987   SMLoc S = getLoc();
2988 
2989   if (Parser.getTok().is(AsmToken::Hash))
2990     Parser.Lex(); // Eat '#'
2991   else if (Parser.getTok().isNot(AsmToken::Integer))
2992     // Operand should start from # or should be integer, emit error otherwise.
2993     return MatchOperand_NoMatch;
2994 
2995   const MCExpr *Imm = nullptr;
2996   if (parseSymbolicImmVal(Imm))
2997     return MatchOperand_ParseFail;
2998   else if (Parser.getTok().isNot(AsmToken::Comma)) {
2999     Operands.push_back(
3000         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3001     return MatchOperand_Success;
3002   }
3003 
3004   // Eat ','
3005   Parser.Lex();
3006 
3007   // The optional operand must be "lsl #N" where N is non-negative.
3008   if (!Parser.getTok().is(AsmToken::Identifier) ||
3009       !Parser.getTok().getIdentifier().equals_insensitive("lsl")) {
3010     Error(getLoc(), "only 'lsl #+N' valid after immediate");
3011     return MatchOperand_ParseFail;
3012   }
3013 
3014   // Eat 'lsl'
3015   Parser.Lex();
3016 
3017   parseOptionalToken(AsmToken::Hash);
3018 
3019   if (Parser.getTok().isNot(AsmToken::Integer)) {
3020     Error(getLoc(), "only 'lsl #+N' valid after immediate");
3021     return MatchOperand_ParseFail;
3022   }
3023 
3024   int64_t ShiftAmount = Parser.getTok().getIntVal();
3025 
3026   if (ShiftAmount < 0) {
3027     Error(getLoc(), "positive shift amount required");
3028     return MatchOperand_ParseFail;
3029   }
3030   Parser.Lex(); // Eat the number
3031 
3032   // Just in case the optional lsl #0 is used for immediates other than zero.
3033   if (ShiftAmount == 0 && Imm != nullptr) {
3034     Operands.push_back(
3035         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3036     return MatchOperand_Success;
3037   }
3038 
3039   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3040                                                       getLoc(), getContext()));
3041   return MatchOperand_Success;
3042 }
3043 
3044 /// parseCondCodeString - Parse a Condition Code string.
3045 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
3046   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3047                     .Case("eq", AArch64CC::EQ)
3048                     .Case("ne", AArch64CC::NE)
3049                     .Case("cs", AArch64CC::HS)
3050                     .Case("hs", AArch64CC::HS)
3051                     .Case("cc", AArch64CC::LO)
3052                     .Case("lo", AArch64CC::LO)
3053                     .Case("mi", AArch64CC::MI)
3054                     .Case("pl", AArch64CC::PL)
3055                     .Case("vs", AArch64CC::VS)
3056                     .Case("vc", AArch64CC::VC)
3057                     .Case("hi", AArch64CC::HI)
3058                     .Case("ls", AArch64CC::LS)
3059                     .Case("ge", AArch64CC::GE)
3060                     .Case("lt", AArch64CC::LT)
3061                     .Case("gt", AArch64CC::GT)
3062                     .Case("le", AArch64CC::LE)
3063                     .Case("al", AArch64CC::AL)
3064                     .Case("nv", AArch64CC::NV)
3065                     .Default(AArch64CC::Invalid);
3066 
3067   if (CC == AArch64CC::Invalid &&
3068       getSTI().getFeatureBits()[AArch64::FeatureSVE])
3069     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3070                     .Case("none",  AArch64CC::EQ)
3071                     .Case("any",   AArch64CC::NE)
3072                     .Case("nlast", AArch64CC::HS)
3073                     .Case("last",  AArch64CC::LO)
3074                     .Case("first", AArch64CC::MI)
3075                     .Case("nfrst", AArch64CC::PL)
3076                     .Case("pmore", AArch64CC::HI)
3077                     .Case("plast", AArch64CC::LS)
3078                     .Case("tcont", AArch64CC::GE)
3079                     .Case("tstop", AArch64CC::LT)
3080                     .Default(AArch64CC::Invalid);
3081 
3082   return CC;
3083 }
3084 
3085 /// parseCondCode - Parse a Condition Code operand.
3086 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3087                                      bool invertCondCode) {
3088   MCAsmParser &Parser = getParser();
3089   SMLoc S = getLoc();
3090   const AsmToken &Tok = Parser.getTok();
3091   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3092 
3093   StringRef Cond = Tok.getString();
3094   AArch64CC::CondCode CC = parseCondCodeString(Cond);
3095   if (CC == AArch64CC::Invalid)
3096     return TokError("invalid condition code");
3097   Parser.Lex(); // Eat identifier token.
3098 
3099   if (invertCondCode) {
3100     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3101       return TokError("condition codes AL and NV are invalid for this instruction");
3102     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3103   }
3104 
3105   Operands.push_back(
3106       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3107   return false;
3108 }
3109 
3110 OperandMatchResultTy
3111 AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3112   MCAsmParser &Parser = getParser();
3113   const AsmToken &Tok = Parser.getTok();
3114   SMLoc S = getLoc();
3115 
3116   if (Tok.isNot(AsmToken::Identifier)) {
3117     TokError("invalid operand for instruction");
3118     return MatchOperand_ParseFail;
3119   }
3120 
3121   unsigned PStateImm = -1;
3122   const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3123   if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3124     PStateImm = SVCR->Encoding;
3125 
3126   Operands.push_back(
3127       AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3128   Parser.Lex(); // Eat identifier token.
3129   return MatchOperand_Success;
3130 }
3131 
3132 OperandMatchResultTy
3133 AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3134   MCAsmParser &Parser = getParser();
3135   const AsmToken &Tok = Parser.getTok();
3136   SMLoc S = getLoc();
3137 
3138   StringRef Name = Tok.getString();
3139 
3140   if (Name.equals_insensitive("za")) {
3141     Parser.Lex(); // eat "za"
3142     Operands.push_back(AArch64Operand::CreateMatrixRegister(
3143         AArch64::ZA, /*ElementWidth=*/0, MatrixKind::Array, S, getLoc(),
3144         getContext()));
3145     if (getLexer().is(AsmToken::LBrac)) {
3146       // There's no comma after matrix operand, so we can parse the next operand
3147       // immediately.
3148       if (parseOperand(Operands, false, false))
3149         return MatchOperand_NoMatch;
3150     }
3151     return MatchOperand_Success;
3152   }
3153 
3154   // Try to parse matrix register.
3155   unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3156   if (!Reg)
3157     return MatchOperand_NoMatch;
3158 
3159   size_t DotPosition = Name.find('.');
3160   assert(DotPosition != StringRef::npos && "Unexpected register");
3161 
3162   StringRef Head = Name.take_front(DotPosition);
3163   StringRef Tail = Name.drop_front(DotPosition);
3164   StringRef RowOrColumn = Head.take_back();
3165 
3166   MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn)
3167                         .Case("h", MatrixKind::Row)
3168                         .Case("v", MatrixKind::Col)
3169                         .Default(MatrixKind::Tile);
3170 
3171   // Next up, parsing the suffix
3172   const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3173   if (!KindRes) {
3174     TokError("Expected the register to be followed by element width suffix");
3175     return MatchOperand_ParseFail;
3176   }
3177   unsigned ElementWidth = KindRes->second;
3178 
3179   Parser.Lex();
3180 
3181   Operands.push_back(AArch64Operand::CreateMatrixRegister(
3182       Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3183 
3184   if (getLexer().is(AsmToken::LBrac)) {
3185     // There's no comma after matrix operand, so we can parse the next operand
3186     // immediately.
3187     if (parseOperand(Operands, false, false))
3188       return MatchOperand_NoMatch;
3189   }
3190   return MatchOperand_Success;
3191 }
3192 
3193 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3194 /// them if present.
3195 OperandMatchResultTy
3196 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3197   MCAsmParser &Parser = getParser();
3198   const AsmToken &Tok = Parser.getTok();
3199   std::string LowerID = Tok.getString().lower();
3200   AArch64_AM::ShiftExtendType ShOp =
3201       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3202           .Case("lsl", AArch64_AM::LSL)
3203           .Case("lsr", AArch64_AM::LSR)
3204           .Case("asr", AArch64_AM::ASR)
3205           .Case("ror", AArch64_AM::ROR)
3206           .Case("msl", AArch64_AM::MSL)
3207           .Case("uxtb", AArch64_AM::UXTB)
3208           .Case("uxth", AArch64_AM::UXTH)
3209           .Case("uxtw", AArch64_AM::UXTW)
3210           .Case("uxtx", AArch64_AM::UXTX)
3211           .Case("sxtb", AArch64_AM::SXTB)
3212           .Case("sxth", AArch64_AM::SXTH)
3213           .Case("sxtw", AArch64_AM::SXTW)
3214           .Case("sxtx", AArch64_AM::SXTX)
3215           .Default(AArch64_AM::InvalidShiftExtend);
3216 
3217   if (ShOp == AArch64_AM::InvalidShiftExtend)
3218     return MatchOperand_NoMatch;
3219 
3220   SMLoc S = Tok.getLoc();
3221   Parser.Lex();
3222 
3223   bool Hash = parseOptionalToken(AsmToken::Hash);
3224 
3225   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3226     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3227         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3228         ShOp == AArch64_AM::MSL) {
3229       // We expect a number here.
3230       TokError("expected #imm after shift specifier");
3231       return MatchOperand_ParseFail;
3232     }
3233 
3234     // "extend" type operations don't need an immediate, #0 is implicit.
3235     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236     Operands.push_back(
3237         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3238     return MatchOperand_Success;
3239   }
3240 
3241   // Make sure we do actually have a number, identifier or a parenthesized
3242   // expression.
3243   SMLoc E = getLoc();
3244   if (!Parser.getTok().is(AsmToken::Integer) &&
3245       !Parser.getTok().is(AsmToken::LParen) &&
3246       !Parser.getTok().is(AsmToken::Identifier)) {
3247     Error(E, "expected integer shift amount");
3248     return MatchOperand_ParseFail;
3249   }
3250 
3251   const MCExpr *ImmVal;
3252   if (getParser().parseExpression(ImmVal))
3253     return MatchOperand_ParseFail;
3254 
3255   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3256   if (!MCE) {
3257     Error(E, "expected constant '#imm' after shift specifier");
3258     return MatchOperand_ParseFail;
3259   }
3260 
3261   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3262   Operands.push_back(AArch64Operand::CreateShiftExtend(
3263       ShOp, MCE->getValue(), true, S, E, getContext()));
3264   return MatchOperand_Success;
3265 }
3266 
3267 static const struct Extension {
3268   const char *Name;
3269   const FeatureBitset Features;
3270 } ExtensionMap[] = {
3271     {"crc", {AArch64::FeatureCRC}},
3272     {"sm4", {AArch64::FeatureSM4}},
3273     {"sha3", {AArch64::FeatureSHA3}},
3274     {"sha2", {AArch64::FeatureSHA2}},
3275     {"aes", {AArch64::FeatureAES}},
3276     {"crypto", {AArch64::FeatureCrypto}},
3277     {"fp", {AArch64::FeatureFPARMv8}},
3278     {"simd", {AArch64::FeatureNEON}},
3279     {"ras", {AArch64::FeatureRAS}},
3280     {"lse", {AArch64::FeatureLSE}},
3281     {"predres", {AArch64::FeaturePredRes}},
3282     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3283     {"mte", {AArch64::FeatureMTE}},
3284     {"memtag", {AArch64::FeatureMTE}},
3285     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3286     {"pan", {AArch64::FeaturePAN}},
3287     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3288     {"ccpp", {AArch64::FeatureCCPP}},
3289     {"rcpc", {AArch64::FeatureRCPC}},
3290     {"rng", {AArch64::FeatureRandGen}},
3291     {"sve", {AArch64::FeatureSVE}},
3292     {"sve2", {AArch64::FeatureSVE2}},
3293     {"sve2-aes", {AArch64::FeatureSVE2AES}},
3294     {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3295     {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3296     {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3297     {"ls64", {AArch64::FeatureLS64}},
3298     {"xs", {AArch64::FeatureXS}},
3299     {"pauth", {AArch64::FeaturePAuth}},
3300     {"flagm", {AArch64::FeatureFlagM}},
3301     {"rme", {AArch64::FeatureRME}},
3302     {"sme", {AArch64::FeatureSME}},
3303     {"sme-f64", {AArch64::FeatureSMEF64}},
3304     {"sme-i64", {AArch64::FeatureSMEI64}},
3305     // FIXME: Unsupported extensions
3306     {"lor", {}},
3307     {"rdma", {}},
3308     {"profile", {}},
3309 };
3310 
3311 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3312   if (FBS[AArch64::HasV8_1aOps])
3313     Str += "ARMv8.1a";
3314   else if (FBS[AArch64::HasV8_2aOps])
3315     Str += "ARMv8.2a";
3316   else if (FBS[AArch64::HasV8_3aOps])
3317     Str += "ARMv8.3a";
3318   else if (FBS[AArch64::HasV8_4aOps])
3319     Str += "ARMv8.4a";
3320   else if (FBS[AArch64::HasV8_5aOps])
3321     Str += "ARMv8.5a";
3322   else if (FBS[AArch64::HasV8_6aOps])
3323     Str += "ARMv8.6a";
3324   else if (FBS[AArch64::HasV8_7aOps])
3325     Str += "ARMv8.7a";
3326   else {
3327     SmallVector<std::string, 2> ExtMatches;
3328     for (const auto& Ext : ExtensionMap) {
3329       // Use & in case multiple features are enabled
3330       if ((FBS & Ext.Features) != FeatureBitset())
3331         ExtMatches.push_back(Ext.Name);
3332     }
3333     Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3334   }
3335 }
3336 
3337 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3338                                       SMLoc S) {
3339   const uint16_t Op2 = Encoding & 7;
3340   const uint16_t Cm = (Encoding & 0x78) >> 3;
3341   const uint16_t Cn = (Encoding & 0x780) >> 7;
3342   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3343 
3344   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3345 
3346   Operands.push_back(
3347       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3348   Operands.push_back(
3349       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3350   Operands.push_back(
3351       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3352   Expr = MCConstantExpr::create(Op2, getContext());
3353   Operands.push_back(
3354       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3355 }
3356 
3357 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3358 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3359 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3360                                    OperandVector &Operands) {
3361   if (Name.find('.') != StringRef::npos)
3362     return TokError("invalid operand");
3363 
3364   Mnemonic = Name;
3365   Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3366 
3367   MCAsmParser &Parser = getParser();
3368   const AsmToken &Tok = Parser.getTok();
3369   StringRef Op = Tok.getString();
3370   SMLoc S = Tok.getLoc();
3371 
3372   if (Mnemonic == "ic") {
3373     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3374     if (!IC)
3375       return TokError("invalid operand for IC instruction");
3376     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3377       std::string Str("IC " + std::string(IC->Name) + " requires: ");
3378       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3379       return TokError(Str.c_str());
3380     }
3381     createSysAlias(IC->Encoding, Operands, S);
3382   } else if (Mnemonic == "dc") {
3383     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3384     if (!DC)
3385       return TokError("invalid operand for DC instruction");
3386     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3387       std::string Str("DC " + std::string(DC->Name) + " requires: ");
3388       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3389       return TokError(Str.c_str());
3390     }
3391     createSysAlias(DC->Encoding, Operands, S);
3392   } else if (Mnemonic == "at") {
3393     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3394     if (!AT)
3395       return TokError("invalid operand for AT instruction");
3396     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3397       std::string Str("AT " + std::string(AT->Name) + " requires: ");
3398       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3399       return TokError(Str.c_str());
3400     }
3401     createSysAlias(AT->Encoding, Operands, S);
3402   } else if (Mnemonic == "tlbi") {
3403     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3404     if (!TLBI)
3405       return TokError("invalid operand for TLBI instruction");
3406     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3407       std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3408       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3409       return TokError(Str.c_str());
3410     }
3411     createSysAlias(TLBI->Encoding, Operands, S);
3412   } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp") {
3413     const AArch64PRCTX::PRCTX *PRCTX = AArch64PRCTX::lookupPRCTXByName(Op);
3414     if (!PRCTX)
3415       return TokError("invalid operand for prediction restriction instruction");
3416     else if (!PRCTX->haveFeatures(getSTI().getFeatureBits())) {
3417       std::string Str(
3418           Mnemonic.upper() + std::string(PRCTX->Name) + " requires: ");
3419       setRequiredFeatureString(PRCTX->getRequiredFeatures(), Str);
3420       return TokError(Str.c_str());
3421     }
3422     uint16_t PRCTX_Op2 =
3423       Mnemonic == "cfp" ? 4 :
3424       Mnemonic == "dvp" ? 5 :
3425       Mnemonic == "cpp" ? 7 :
3426       0;
3427     assert(PRCTX_Op2 && "Invalid mnemonic for prediction restriction instruction");
3428     createSysAlias(PRCTX->Encoding << 3 | PRCTX_Op2 , Operands, S);
3429   }
3430 
3431   Parser.Lex(); // Eat operand.
3432 
3433   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3434   bool HasRegister = false;
3435 
3436   // Check for the optional register operand.
3437   if (parseOptionalToken(AsmToken::Comma)) {
3438     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3439       return TokError("expected register operand");
3440     HasRegister = true;
3441   }
3442 
3443   if (ExpectRegister && !HasRegister)
3444     return TokError("specified " + Mnemonic + " op requires a register");
3445   else if (!ExpectRegister && HasRegister)
3446     return TokError("specified " + Mnemonic + " op does not use a register");
3447 
3448   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3449     return true;
3450 
3451   return false;
3452 }
3453 
3454 OperandMatchResultTy
3455 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3456   MCAsmParser &Parser = getParser();
3457   const AsmToken &Tok = Parser.getTok();
3458 
3459   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
3460     TokError("'csync' operand expected");
3461     return MatchOperand_ParseFail;
3462   } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3463     // Immediate operand.
3464     const MCExpr *ImmVal;
3465     SMLoc ExprLoc = getLoc();
3466     AsmToken IntTok = Tok;
3467     if (getParser().parseExpression(ImmVal))
3468       return MatchOperand_ParseFail;
3469     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3470     if (!MCE) {
3471       Error(ExprLoc, "immediate value expected for barrier operand");
3472       return MatchOperand_ParseFail;
3473     }
3474     int64_t Value = MCE->getValue();
3475     if (Mnemonic == "dsb" && Value > 15) {
3476       // This case is a no match here, but it might be matched by the nXS
3477       // variant. Deliberately not unlex the optional '#' as it is not necessary
3478       // to characterize an integer immediate.
3479       Parser.getLexer().UnLex(IntTok);
3480       return MatchOperand_NoMatch;
3481     }
3482     if (Value < 0 || Value > 15) {
3483       Error(ExprLoc, "barrier operand out of range");
3484       return MatchOperand_ParseFail;
3485     }
3486     auto DB = AArch64DB::lookupDBByEncoding(Value);
3487     Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3488                                                      ExprLoc, getContext(),
3489                                                      false /*hasnXSModifier*/));
3490     return MatchOperand_Success;
3491   }
3492 
3493   if (Tok.isNot(AsmToken::Identifier)) {
3494     TokError("invalid operand for instruction");
3495     return MatchOperand_ParseFail;
3496   }
3497 
3498   StringRef Operand = Tok.getString();
3499   auto TSB = AArch64TSB::lookupTSBByName(Operand);
3500   auto DB = AArch64DB::lookupDBByName(Operand);
3501   // The only valid named option for ISB is 'sy'
3502   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3503     TokError("'sy' or #imm operand expected");
3504     return MatchOperand_ParseFail;
3505   // The only valid named option for TSB is 'csync'
3506   } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3507     TokError("'csync' operand expected");
3508     return MatchOperand_ParseFail;
3509   } else if (!DB && !TSB) {
3510     if (Mnemonic == "dsb") {
3511       // This case is a no match here, but it might be matched by the nXS
3512       // variant.
3513       return MatchOperand_NoMatch;
3514     }
3515     TokError("invalid barrier option name");
3516     return MatchOperand_ParseFail;
3517   }
3518 
3519   Operands.push_back(AArch64Operand::CreateBarrier(
3520       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3521       getContext(), false /*hasnXSModifier*/));
3522   Parser.Lex(); // Consume the option
3523 
3524   return MatchOperand_Success;
3525 }
3526 
3527 OperandMatchResultTy
3528 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3529   MCAsmParser &Parser = getParser();
3530   const AsmToken &Tok = Parser.getTok();
3531 
3532   assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3533   if (Mnemonic != "dsb")
3534     return MatchOperand_ParseFail;
3535 
3536   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3537     // Immediate operand.
3538     const MCExpr *ImmVal;
3539     SMLoc ExprLoc = getLoc();
3540     if (getParser().parseExpression(ImmVal))
3541       return MatchOperand_ParseFail;
3542     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3543     if (!MCE) {
3544       Error(ExprLoc, "immediate value expected for barrier operand");
3545       return MatchOperand_ParseFail;
3546     }
3547     int64_t Value = MCE->getValue();
3548     // v8.7-A DSB in the nXS variant accepts only the following immediate
3549     // values: 16, 20, 24, 28.
3550     if (Value != 16 && Value != 20 && Value != 24 && Value != 28) {
3551       Error(ExprLoc, "barrier operand out of range");
3552       return MatchOperand_ParseFail;
3553     }
3554     auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3555     Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3556                                                      ExprLoc, getContext(),
3557                                                      true /*hasnXSModifier*/));
3558     return MatchOperand_Success;
3559   }
3560 
3561   if (Tok.isNot(AsmToken::Identifier)) {
3562     TokError("invalid operand for instruction");
3563     return MatchOperand_ParseFail;
3564   }
3565 
3566   StringRef Operand = Tok.getString();
3567   auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3568 
3569   if (!DB) {
3570     TokError("invalid barrier option name");
3571     return MatchOperand_ParseFail;
3572   }
3573 
3574   Operands.push_back(
3575       AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3576                                     getContext(), true /*hasnXSModifier*/));
3577   Parser.Lex(); // Consume the option
3578 
3579   return MatchOperand_Success;
3580 }
3581 
3582 OperandMatchResultTy
3583 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3584   MCAsmParser &Parser = getParser();
3585   const AsmToken &Tok = Parser.getTok();
3586 
3587   if (Tok.isNot(AsmToken::Identifier))
3588     return MatchOperand_NoMatch;
3589 
3590   if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3591     return MatchOperand_NoMatch;
3592 
3593   int MRSReg, MSRReg;
3594   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3595   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3596     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3597     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3598   } else
3599     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3600 
3601   auto PState = AArch64PState::lookupPStateByName(Tok.getString());
3602   unsigned PStateImm = -1;
3603   if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3604     PStateImm = PState->Encoding;
3605 
3606   Operands.push_back(
3607       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3608                                    PStateImm, getContext()));
3609   Parser.Lex(); // Eat identifier
3610 
3611   return MatchOperand_Success;
3612 }
3613 
3614 /// tryParseNeonVectorRegister - Parse a vector register operand.
3615 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
3616   MCAsmParser &Parser = getParser();
3617   if (Parser.getTok().isNot(AsmToken::Identifier))
3618     return true;
3619 
3620   SMLoc S = getLoc();
3621   // Check for a vector register specifier first.
3622   StringRef Kind;
3623   unsigned Reg;
3624   OperandMatchResultTy Res =
3625       tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3626   if (Res != MatchOperand_Success)
3627     return true;
3628 
3629   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
3630   if (!KindRes)
3631     return true;
3632 
3633   unsigned ElementWidth = KindRes->second;
3634   Operands.push_back(
3635       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3636                                       S, getLoc(), getContext()));
3637 
3638   // If there was an explicit qualifier, that goes on as a literal text
3639   // operand.
3640   if (!Kind.empty())
3641     Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
3642 
3643   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3644 }
3645 
3646 OperandMatchResultTy
3647 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
3648   SMLoc SIdx = getLoc();
3649   if (parseOptionalToken(AsmToken::LBrac)) {
3650     const MCExpr *ImmVal;
3651     if (getParser().parseExpression(ImmVal))
3652       return MatchOperand_NoMatch;
3653     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3654     if (!MCE) {
3655       TokError("immediate value expected for vector index");
3656       return MatchOperand_ParseFail;;
3657     }
3658 
3659     SMLoc E = getLoc();
3660 
3661     if (parseToken(AsmToken::RBrac, "']' expected"))
3662       return MatchOperand_ParseFail;;
3663 
3664     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3665                                                          E, getContext()));
3666     return MatchOperand_Success;
3667   }
3668 
3669   return MatchOperand_NoMatch;
3670 }
3671 
3672 // tryParseVectorRegister - Try to parse a vector register name with
3673 // optional kind specifier. If it is a register specifier, eat the token
3674 // and return it.
3675 OperandMatchResultTy
3676 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
3677                                          RegKind MatchKind) {
3678   MCAsmParser &Parser = getParser();
3679   const AsmToken &Tok = Parser.getTok();
3680 
3681   if (Tok.isNot(AsmToken::Identifier))
3682     return MatchOperand_NoMatch;
3683 
3684   StringRef Name = Tok.getString();
3685   // If there is a kind specifier, it's separated from the register name by
3686   // a '.'.
3687   size_t Start = 0, Next = Name.find('.');
3688   StringRef Head = Name.slice(Start, Next);
3689   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3690 
3691   if (RegNum) {
3692     if (Next != StringRef::npos) {
3693       Kind = Name.slice(Next, StringRef::npos);
3694       if (!isValidVectorKind(Kind, MatchKind)) {
3695         TokError("invalid vector kind qualifier");
3696         return MatchOperand_ParseFail;
3697       }
3698     }
3699     Parser.Lex(); // Eat the register token.
3700 
3701     Reg = RegNum;
3702     return MatchOperand_Success;
3703   }
3704 
3705   return MatchOperand_NoMatch;
3706 }
3707 
3708 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3709 OperandMatchResultTy
3710 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3711   // Check for a SVE predicate register specifier first.
3712   const SMLoc S = getLoc();
3713   StringRef Kind;
3714   unsigned RegNum;
3715   auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3716   if (Res != MatchOperand_Success)
3717     return Res;
3718 
3719   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3720   if (!KindRes)
3721     return MatchOperand_NoMatch;
3722 
3723   unsigned ElementWidth = KindRes->second;
3724   Operands.push_back(AArch64Operand::CreateVectorReg(
3725       RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3726       getLoc(), getContext()));
3727 
3728   if (getLexer().is(AsmToken::LBrac)) {
3729     // Indexed predicate, there's no comma so try parse the next operand
3730     // immediately.
3731     if (parseOperand(Operands, false, false))
3732       return MatchOperand_NoMatch;
3733   }
3734 
3735   // Not all predicates are followed by a '/m' or '/z'.
3736   MCAsmParser &Parser = getParser();
3737   if (Parser.getTok().isNot(AsmToken::Slash))
3738     return MatchOperand_Success;
3739 
3740   // But when they do they shouldn't have an element type suffix.
3741   if (!Kind.empty()) {
3742     Error(S, "not expecting size suffix");
3743     return MatchOperand_ParseFail;
3744   }
3745 
3746   // Add a literal slash as operand
3747   Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
3748 
3749   Parser.Lex(); // Eat the slash.
3750 
3751   // Zeroing or merging?
3752   auto Pred = Parser.getTok().getString().lower();
3753   if (Pred != "z" && Pred != "m") {
3754     Error(getLoc(), "expecting 'm' or 'z' predication");
3755     return MatchOperand_ParseFail;
3756   }
3757 
3758   // Add zero/merge token.
3759   const char *ZM = Pred == "z" ? "z" : "m";
3760   Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3761 
3762   Parser.Lex(); // Eat zero/merge token.
3763   return MatchOperand_Success;
3764 }
3765 
3766 /// parseRegister - Parse a register operand.
3767 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3768   // Try for a Neon vector register.
3769   if (!tryParseNeonVectorRegister(Operands))
3770     return false;
3771 
3772   // Otherwise try for a scalar register.
3773   if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3774     return false;
3775 
3776   return true;
3777 }
3778 
3779 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3780   MCAsmParser &Parser = getParser();
3781   bool HasELFModifier = false;
3782   AArch64MCExpr::VariantKind RefKind;
3783 
3784   if (parseOptionalToken(AsmToken::Colon)) {
3785     HasELFModifier = true;
3786 
3787     if (Parser.getTok().isNot(AsmToken::Identifier))
3788       return TokError("expect relocation specifier in operand after ':'");
3789 
3790     std::string LowerCase = Parser.getTok().getIdentifier().lower();
3791     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3792                   .Case("lo12", AArch64MCExpr::VK_LO12)
3793                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3794                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3795                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3796                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3797                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3798                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3799                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3800                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3801                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3802                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3803                   .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
3804                   .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
3805                   .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
3806                   .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
3807                   .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
3808                   .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
3809                   .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
3810                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3811                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3812                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3813                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3814                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3815                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3816                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3817                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3818                   .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
3819                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3820                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3821                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3822                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3823                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3824                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3825                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3826                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3827                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3828                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3829                   .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
3830                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3831                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3832                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3833                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3834                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3835                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3836                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3837                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3838                   .Default(AArch64MCExpr::VK_INVALID);
3839 
3840     if (RefKind == AArch64MCExpr::VK_INVALID)
3841       return TokError("expect relocation specifier in operand after ':'");
3842 
3843     Parser.Lex(); // Eat identifier
3844 
3845     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3846       return true;
3847   }
3848 
3849   if (getParser().parseExpression(ImmVal))
3850     return true;
3851 
3852   if (HasELFModifier)
3853     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3854 
3855   return false;
3856 }
3857 
3858 OperandMatchResultTy
3859 AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
3860   MCAsmParser &Parser = getParser();
3861 
3862   if (Parser.getTok().isNot(AsmToken::LCurly))
3863     return MatchOperand_NoMatch;
3864 
3865   auto ParseMatrixTile = [this, &Parser](unsigned &Reg,
3866                                          unsigned &ElementWidth) {
3867     StringRef Name = Parser.getTok().getString();
3868     size_t DotPosition = Name.find('.');
3869     if (DotPosition == StringRef::npos)
3870       return MatchOperand_NoMatch;
3871 
3872     unsigned RegNum = matchMatrixTileListRegName(Name);
3873     if (!RegNum)
3874       return MatchOperand_NoMatch;
3875 
3876     StringRef Tail = Name.drop_front(DotPosition);
3877     const Optional<std::pair<int, int>> &KindRes =
3878         parseVectorKind(Tail, RegKind::Matrix);
3879     if (!KindRes) {
3880       TokError("Expected the register to be followed by element width suffix");
3881       return MatchOperand_ParseFail;
3882     }
3883     ElementWidth = KindRes->second;
3884     Reg = RegNum;
3885     Parser.Lex(); // Eat the register.
3886     return MatchOperand_Success;
3887   };
3888 
3889   SMLoc S = getLoc();
3890   auto LCurly = Parser.getTok();
3891   Parser.Lex(); // Eat left bracket token.
3892 
3893   // Empty matrix list
3894   if (parseOptionalToken(AsmToken::RCurly)) {
3895     Operands.push_back(AArch64Operand::CreateMatrixTileList(
3896         /*RegMask=*/0, S, getLoc(), getContext()));
3897     return MatchOperand_Success;
3898   }
3899 
3900   // Try parse {za} alias early
3901   if (Parser.getTok().getString().equals_insensitive("za")) {
3902     Parser.Lex(); // Eat 'za'
3903 
3904     if (parseToken(AsmToken::RCurly, "'}' expected"))
3905       return MatchOperand_ParseFail;
3906 
3907     Operands.push_back(AArch64Operand::CreateMatrixTileList(
3908         /*RegMask=*/0xFF, S, getLoc(), getContext()));
3909     return MatchOperand_Success;
3910   }
3911 
3912   SMLoc TileLoc = getLoc();
3913 
3914   unsigned FirstReg, ElementWidth;
3915   auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3916   if (ParseRes != MatchOperand_Success) {
3917     Parser.getLexer().UnLex(LCurly);
3918     return ParseRes;
3919   }
3920 
3921   const MCRegisterInfo *RI = getContext().getRegisterInfo();
3922 
3923   unsigned PrevReg = FirstReg;
3924   unsigned Count = 1;
3925 
3926   SmallSet<unsigned, 8> DRegs;
3927   AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3928 
3929   SmallSet<unsigned, 8> SeenRegs;
3930   SeenRegs.insert(FirstReg);
3931 
3932   while (parseOptionalToken(AsmToken::Comma)) {
3933     TileLoc = getLoc();
3934     unsigned Reg, NextElementWidth;
3935     ParseRes = ParseMatrixTile(Reg, NextElementWidth);
3936     if (ParseRes != MatchOperand_Success)
3937       return ParseRes;
3938 
3939     // Element size must match on all regs in the list.
3940     if (ElementWidth != NextElementWidth) {
3941       Error(TileLoc, "mismatched register size suffix");
3942       return MatchOperand_ParseFail;
3943     }
3944 
3945     if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
3946       Warning(TileLoc, "tile list not in ascending order");
3947 
3948     if (SeenRegs.contains(Reg))
3949       Warning(TileLoc, "duplicate tile in list");
3950     else {
3951       SeenRegs.insert(Reg);
3952       AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
3953     }
3954 
3955     PrevReg = Reg;
3956     ++Count;
3957   }
3958 
3959   if (parseToken(AsmToken::RCurly, "'}' expected"))
3960     return MatchOperand_ParseFail;
3961 
3962   unsigned RegMask = 0;
3963   for (auto Reg : DRegs)
3964     RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
3965                        RI->getEncodingValue(AArch64::ZAD0));
3966   Operands.push_back(
3967       AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
3968 
3969   return MatchOperand_Success;
3970 }
3971 
3972 template <RegKind VectorKind>
3973 OperandMatchResultTy
3974 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3975                                      bool ExpectMatch) {
3976   MCAsmParser &Parser = getParser();
3977   if (!Parser.getTok().is(AsmToken::LCurly))
3978     return MatchOperand_NoMatch;
3979 
3980   // Wrapper around parse function
3981   auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3982                                      bool NoMatchIsError) {
3983     auto RegTok = Parser.getTok();
3984     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3985     if (ParseRes == MatchOperand_Success) {
3986       if (parseVectorKind(Kind, VectorKind))
3987         return ParseRes;
3988       llvm_unreachable("Expected a valid vector kind");
3989     }
3990 
3991     if (RegTok.isNot(AsmToken::Identifier) ||
3992         ParseRes == MatchOperand_ParseFail ||
3993         (ParseRes == MatchOperand_NoMatch && NoMatchIsError &&
3994          !RegTok.getString().startswith_insensitive("za"))) {
3995       Error(Loc, "vector register expected");
3996       return MatchOperand_ParseFail;
3997     }
3998 
3999     return MatchOperand_NoMatch;
4000   };
4001 
4002   SMLoc S = getLoc();
4003   auto LCurly = Parser.getTok();
4004   Parser.Lex(); // Eat left bracket token.
4005 
4006   StringRef Kind;
4007   unsigned FirstReg;
4008   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4009 
4010   // Put back the original left bracket if there was no match, so that
4011   // different types of list-operands can be matched (e.g. SVE, Neon).
4012   if (ParseRes == MatchOperand_NoMatch)
4013     Parser.getLexer().UnLex(LCurly);
4014 
4015   if (ParseRes != MatchOperand_Success)
4016     return ParseRes;
4017 
4018   int64_t PrevReg = FirstReg;
4019   unsigned Count = 1;
4020 
4021   if (parseOptionalToken(AsmToken::Minus)) {
4022     SMLoc Loc = getLoc();
4023     StringRef NextKind;
4024 
4025     unsigned Reg;
4026     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4027     if (ParseRes != MatchOperand_Success)
4028       return ParseRes;
4029 
4030     // Any Kind suffices must match on all regs in the list.
4031     if (Kind != NextKind) {
4032       Error(Loc, "mismatched register size suffix");
4033       return MatchOperand_ParseFail;
4034     }
4035 
4036     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
4037 
4038     if (Space == 0 || Space > 3) {
4039       Error(Loc, "invalid number of vectors");
4040       return MatchOperand_ParseFail;
4041     }
4042 
4043     Count += Space;
4044   }
4045   else {
4046     while (parseOptionalToken(AsmToken::Comma)) {
4047       SMLoc Loc = getLoc();
4048       StringRef NextKind;
4049       unsigned Reg;
4050       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4051       if (ParseRes != MatchOperand_Success)
4052         return ParseRes;
4053 
4054       // Any Kind suffices must match on all regs in the list.
4055       if (Kind != NextKind) {
4056         Error(Loc, "mismatched register size suffix");
4057         return MatchOperand_ParseFail;
4058       }
4059 
4060       // Registers must be incremental (with wraparound at 31)
4061       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
4062           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4063         Error(Loc, "registers must be sequential");
4064         return MatchOperand_ParseFail;
4065       }
4066 
4067       PrevReg = Reg;
4068       ++Count;
4069     }
4070   }
4071 
4072   if (parseToken(AsmToken::RCurly, "'}' expected"))
4073     return MatchOperand_ParseFail;
4074 
4075   if (Count > 4) {
4076     Error(S, "invalid number of vectors");
4077     return MatchOperand_ParseFail;
4078   }
4079 
4080   unsigned NumElements = 0;
4081   unsigned ElementWidth = 0;
4082   if (!Kind.empty()) {
4083     if (const auto &VK = parseVectorKind(Kind, VectorKind))
4084       std::tie(NumElements, ElementWidth) = *VK;
4085   }
4086 
4087   Operands.push_back(AArch64Operand::CreateVectorList(
4088       FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
4089       getContext()));
4090 
4091   return MatchOperand_Success;
4092 }
4093 
4094 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4095 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4096   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4097   if (ParseRes != MatchOperand_Success)
4098     return true;
4099 
4100   return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
4101 }
4102 
4103 OperandMatchResultTy
4104 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4105   SMLoc StartLoc = getLoc();
4106 
4107   unsigned RegNum;
4108   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4109   if (Res != MatchOperand_Success)
4110     return Res;
4111 
4112   if (!parseOptionalToken(AsmToken::Comma)) {
4113     Operands.push_back(AArch64Operand::CreateReg(
4114         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4115     return MatchOperand_Success;
4116   }
4117 
4118   parseOptionalToken(AsmToken::Hash);
4119 
4120   if (getParser().getTok().isNot(AsmToken::Integer)) {
4121     Error(getLoc(), "index must be absent or #0");
4122     return MatchOperand_ParseFail;
4123   }
4124 
4125   const MCExpr *ImmVal;
4126   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4127       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4128     Error(getLoc(), "index must be absent or #0");
4129     return MatchOperand_ParseFail;
4130   }
4131 
4132   Operands.push_back(AArch64Operand::CreateReg(
4133       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4134   return MatchOperand_Success;
4135 }
4136 
4137 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4138 OperandMatchResultTy
4139 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4140   SMLoc StartLoc = getLoc();
4141 
4142   unsigned RegNum;
4143   OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
4144   if (Res != MatchOperand_Success)
4145     return Res;
4146 
4147   // No shift/extend is the default.
4148   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
4149     Operands.push_back(AArch64Operand::CreateReg(
4150         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4151     return MatchOperand_Success;
4152   }
4153 
4154   // Eat the comma
4155   getParser().Lex();
4156 
4157   // Match the shift
4158   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4159   Res = tryParseOptionalShiftExtend(ExtOpnd);
4160   if (Res != MatchOperand_Success)
4161     return Res;
4162 
4163   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4164   Operands.push_back(AArch64Operand::CreateReg(
4165       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4166       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4167       Ext->hasShiftExtendAmount()));
4168 
4169   return MatchOperand_Success;
4170 }
4171 
4172 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4173   MCAsmParser &Parser = getParser();
4174 
4175   // Some SVE instructions have a decoration after the immediate, i.e.
4176   // "mul vl". We parse them here and add tokens, which must be present in the
4177   // asm string in the tablegen instruction.
4178   bool NextIsVL =
4179       Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4180   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4181   if (!Parser.getTok().getString().equals_insensitive("mul") ||
4182       !(NextIsVL || NextIsHash))
4183     return true;
4184 
4185   Operands.push_back(
4186       AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4187   Parser.Lex(); // Eat the "mul"
4188 
4189   if (NextIsVL) {
4190     Operands.push_back(
4191         AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4192     Parser.Lex(); // Eat the "vl"
4193     return false;
4194   }
4195 
4196   if (NextIsHash) {
4197     Parser.Lex(); // Eat the #
4198     SMLoc S = getLoc();
4199 
4200     // Parse immediate operand.
4201     const MCExpr *ImmVal;
4202     if (!Parser.parseExpression(ImmVal))
4203       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4204         Operands.push_back(AArch64Operand::CreateImm(
4205             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4206             getContext()));
4207         return MatchOperand_Success;
4208       }
4209   }
4210 
4211   return Error(getLoc(), "expected 'vl' or '#<imm>'");
4212 }
4213 
4214 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4215   MCAsmParser &Parser = getParser();
4216   auto Tok = Parser.getTok();
4217   if (Tok.isNot(AsmToken::Identifier))
4218     return true;
4219 
4220   auto Keyword = Tok.getString();
4221   Keyword = StringSwitch<StringRef>(Keyword.lower())
4222                 .Case("sm", "sm")
4223                 .Case("za", "za")
4224                 .Default(Keyword);
4225   Operands.push_back(
4226       AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4227 
4228   Parser.Lex();
4229   return false;
4230 }
4231 
4232 /// parseOperand - Parse a arm instruction operand.  For now this parses the
4233 /// operand regardless of the mnemonic.
4234 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4235                                   bool invertCondCode) {
4236   MCAsmParser &Parser = getParser();
4237 
4238   OperandMatchResultTy ResTy =
4239       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
4240 
4241   // Check if the current operand has a custom associated parser, if so, try to
4242   // custom parse the operand, or fallback to the general approach.
4243   if (ResTy == MatchOperand_Success)
4244     return false;
4245   // If there wasn't a custom match, try the generic matcher below. Otherwise,
4246   // there was a match, but an error occurred, in which case, just return that
4247   // the operand parsing failed.
4248   if (ResTy == MatchOperand_ParseFail)
4249     return true;
4250 
4251   // Nothing custom, so do general case parsing.
4252   SMLoc S, E;
4253   switch (getLexer().getKind()) {
4254   default: {
4255     SMLoc S = getLoc();
4256     const MCExpr *Expr;
4257     if (parseSymbolicImmVal(Expr))
4258       return Error(S, "invalid operand");
4259 
4260     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4261     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4262     return false;
4263   }
4264   case AsmToken::LBrac: {
4265     Operands.push_back(
4266         AArch64Operand::CreateToken("[", getLoc(), getContext()));
4267     Parser.Lex(); // Eat '['
4268 
4269     // There's no comma after a '[', so we can parse the next operand
4270     // immediately.
4271     return parseOperand(Operands, false, false);
4272   }
4273   case AsmToken::LCurly: {
4274     if (!parseNeonVectorList(Operands))
4275       return false;
4276 
4277     Operands.push_back(
4278         AArch64Operand::CreateToken("{", getLoc(), getContext()));
4279     Parser.Lex(); // Eat '{'
4280 
4281     // There's no comma after a '{', so we can parse the next operand
4282     // immediately.
4283     return parseOperand(Operands, false, false);
4284   }
4285   case AsmToken::Identifier: {
4286     // If we're expecting a Condition Code operand, then just parse that.
4287     if (isCondCode)
4288       return parseCondCode(Operands, invertCondCode);
4289 
4290     // If it's a register name, parse it.
4291     if (!parseRegister(Operands))
4292       return false;
4293 
4294     // See if this is a "mul vl" decoration or "mul #<int>" operand used
4295     // by SVE instructions.
4296     if (!parseOptionalMulOperand(Operands))
4297       return false;
4298 
4299     // If this is an "smstart" or "smstop" instruction, parse its special
4300     // keyword operand as an identifier.
4301     if (Mnemonic == "smstart" || Mnemonic == "smstop")
4302       return parseKeywordOperand(Operands);
4303 
4304     // This could be an optional "shift" or "extend" operand.
4305     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
4306     // We can only continue if no tokens were eaten.
4307     if (GotShift != MatchOperand_NoMatch)
4308       return GotShift;
4309 
4310     // If this is a two-word mnemonic, parse its special keyword
4311     // operand as an identifier.
4312     if (Mnemonic == "brb")
4313       return parseKeywordOperand(Operands);
4314 
4315     // This was not a register so parse other operands that start with an
4316     // identifier (like labels) as expressions and create them as immediates.
4317     const MCExpr *IdVal;
4318     S = getLoc();
4319     if (getParser().parseExpression(IdVal))
4320       return true;
4321     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4322     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4323     return false;
4324   }
4325   case AsmToken::Integer:
4326   case AsmToken::Real:
4327   case AsmToken::Hash: {
4328     // #42 -> immediate.
4329     S = getLoc();
4330 
4331     parseOptionalToken(AsmToken::Hash);
4332 
4333     // Parse a negative sign
4334     bool isNegative = false;
4335     if (Parser.getTok().is(AsmToken::Minus)) {
4336       isNegative = true;
4337       // We need to consume this token only when we have a Real, otherwise
4338       // we let parseSymbolicImmVal take care of it
4339       if (Parser.getLexer().peekTok().is(AsmToken::Real))
4340         Parser.Lex();
4341     }
4342 
4343     // The only Real that should come through here is a literal #0.0 for
4344     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4345     // so convert the value.
4346     const AsmToken &Tok = Parser.getTok();
4347     if (Tok.is(AsmToken::Real)) {
4348       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4349       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4350       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4351           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4352           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4353         return TokError("unexpected floating point literal");
4354       else if (IntVal != 0 || isNegative)
4355         return TokError("expected floating-point constant #0.0");
4356       Parser.Lex(); // Eat the token.
4357 
4358       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4359       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4360       return false;
4361     }
4362 
4363     const MCExpr *ImmVal;
4364     if (parseSymbolicImmVal(ImmVal))
4365       return true;
4366 
4367     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4368     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4369     return false;
4370   }
4371   case AsmToken::Equal: {
4372     SMLoc Loc = getLoc();
4373     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4374       return TokError("unexpected token in operand");
4375     Parser.Lex(); // Eat '='
4376     const MCExpr *SubExprVal;
4377     if (getParser().parseExpression(SubExprVal))
4378       return true;
4379 
4380     if (Operands.size() < 2 ||
4381         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4382       return Error(Loc, "Only valid when first operand is register");
4383 
4384     bool IsXReg =
4385         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4386             Operands[1]->getReg());
4387 
4388     MCContext& Ctx = getContext();
4389     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4390     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4391     if (isa<MCConstantExpr>(SubExprVal)) {
4392       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4393       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4394       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
4395         ShiftAmt += 16;
4396         Imm >>= 16;
4397       }
4398       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4399         Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4400         Operands.push_back(AArch64Operand::CreateImm(
4401             MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4402         if (ShiftAmt)
4403           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4404                      ShiftAmt, true, S, E, Ctx));
4405         return false;
4406       }
4407       APInt Simm = APInt(64, Imm << ShiftAmt);
4408       // check if the immediate is an unsigned or signed 32-bit int for W regs
4409       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4410         return Error(Loc, "Immediate too large for register");
4411     }
4412     // If it is a label or an imm that cannot fit in a movz, put it into CP.
4413     const MCExpr *CPLoc =
4414         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4415     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4416     return false;
4417   }
4418   }
4419 }
4420 
4421 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4422   const MCExpr *Expr = nullptr;
4423   SMLoc L = getLoc();
4424   if (check(getParser().parseExpression(Expr), L, "expected expression"))
4425     return true;
4426   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4427   if (check(!Value, L, "expected constant expression"))
4428     return true;
4429   Out = Value->getValue();
4430   return false;
4431 }
4432 
4433 bool AArch64AsmParser::parseComma() {
4434   if (check(getParser().getTok().isNot(AsmToken::Comma), getLoc(),
4435             "expected comma"))
4436     return true;
4437   // Eat the comma
4438   getParser().Lex();
4439   return false;
4440 }
4441 
4442 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4443                                             unsigned First, unsigned Last) {
4444   unsigned Reg;
4445   SMLoc Start, End;
4446   if (check(ParseRegister(Reg, Start, End), getLoc(), "expected register"))
4447     return true;
4448 
4449   // Special handling for FP and LR; they aren't linearly after x28 in
4450   // the registers enum.
4451   unsigned RangeEnd = Last;
4452   if (Base == AArch64::X0) {
4453     if (Last == AArch64::FP) {
4454       RangeEnd = AArch64::X28;
4455       if (Reg == AArch64::FP) {
4456         Out = 29;
4457         return false;
4458       }
4459     }
4460     if (Last == AArch64::LR) {
4461       RangeEnd = AArch64::X28;
4462       if (Reg == AArch64::FP) {
4463         Out = 29;
4464         return false;
4465       } else if (Reg == AArch64::LR) {
4466         Out = 30;
4467         return false;
4468       }
4469     }
4470   }
4471 
4472   if (check(Reg < First || Reg > RangeEnd, Start,
4473             Twine("expected register in range ") +
4474                 AArch64InstPrinter::getRegisterName(First) + " to " +
4475                 AArch64InstPrinter::getRegisterName(Last)))
4476     return true;
4477   Out = Reg - Base;
4478   return false;
4479 }
4480 
4481 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
4482                                  const MCParsedAsmOperand &Op2) const {
4483   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4484   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4485   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4486       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4487     return MCTargetAsmParser::regsEqual(Op1, Op2);
4488 
4489   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4490          "Testing equality of non-scalar registers not supported");
4491 
4492   // Check if a registers match their sub/super register classes.
4493   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4494     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4495   if (AOp1.getRegEqualityTy() == EqualsSubReg)
4496     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4497   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4498     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4499   if (AOp2.getRegEqualityTy() == EqualsSubReg)
4500     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4501 
4502   return false;
4503 }
4504 
4505 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4506 /// operands.
4507 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4508                                         StringRef Name, SMLoc NameLoc,
4509                                         OperandVector &Operands) {
4510   MCAsmParser &Parser = getParser();
4511   Name = StringSwitch<StringRef>(Name.lower())
4512              .Case("beq", "b.eq")
4513              .Case("bne", "b.ne")
4514              .Case("bhs", "b.hs")
4515              .Case("bcs", "b.cs")
4516              .Case("blo", "b.lo")
4517              .Case("bcc", "b.cc")
4518              .Case("bmi", "b.mi")
4519              .Case("bpl", "b.pl")
4520              .Case("bvs", "b.vs")
4521              .Case("bvc", "b.vc")
4522              .Case("bhi", "b.hi")
4523              .Case("bls", "b.ls")
4524              .Case("bge", "b.ge")
4525              .Case("blt", "b.lt")
4526              .Case("bgt", "b.gt")
4527              .Case("ble", "b.le")
4528              .Case("bal", "b.al")
4529              .Case("bnv", "b.nv")
4530              .Default(Name);
4531 
4532   // First check for the AArch64-specific .req directive.
4533   if (Parser.getTok().is(AsmToken::Identifier) &&
4534       Parser.getTok().getIdentifier().lower() == ".req") {
4535     parseDirectiveReq(Name, NameLoc);
4536     // We always return 'error' for this, as we're done with this
4537     // statement and don't need to match the 'instruction."
4538     return true;
4539   }
4540 
4541   // Create the leading tokens for the mnemonic, split by '.' characters.
4542   size_t Start = 0, Next = Name.find('.');
4543   StringRef Head = Name.slice(Start, Next);
4544 
4545   // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4546   // the SYS instruction.
4547   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4548       Head == "cfp" || Head == "dvp" || Head == "cpp")
4549     return parseSysAlias(Head, NameLoc, Operands);
4550 
4551   Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4552   Mnemonic = Head;
4553 
4554   // Handle condition codes for a branch mnemonic
4555   if (Head == "b" && Next != StringRef::npos) {
4556     Start = Next;
4557     Next = Name.find('.', Start + 1);
4558     Head = Name.slice(Start + 1, Next);
4559 
4560     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4561                                             (Head.data() - Name.data()));
4562     AArch64CC::CondCode CC = parseCondCodeString(Head);
4563     if (CC == AArch64CC::Invalid)
4564       return Error(SuffixLoc, "invalid condition code");
4565     Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
4566                                                    /*IsSuffix=*/true));
4567     Operands.push_back(
4568         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4569   }
4570 
4571   // Add the remaining tokens in the mnemonic.
4572   while (Next != StringRef::npos) {
4573     Start = Next;
4574     Next = Name.find('.', Start + 1);
4575     Head = Name.slice(Start, Next);
4576     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
4577                                             (Head.data() - Name.data()) + 1);
4578     Operands.push_back(AArch64Operand::CreateToken(
4579         Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
4580   }
4581 
4582   // Conditional compare instructions have a Condition Code operand, which needs
4583   // to be parsed and an immediate operand created.
4584   bool condCodeFourthOperand =
4585       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
4586        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
4587        Head == "csinc" || Head == "csinv" || Head == "csneg");
4588 
4589   // These instructions are aliases to some of the conditional select
4590   // instructions. However, the condition code is inverted in the aliased
4591   // instruction.
4592   //
4593   // FIXME: Is this the correct way to handle these? Or should the parser
4594   //        generate the aliased instructions directly?
4595   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
4596   bool condCodeThirdOperand =
4597       (Head == "cinc" || Head == "cinv" || Head == "cneg");
4598 
4599   // Read the remaining operands.
4600   if (getLexer().isNot(AsmToken::EndOfStatement)) {
4601 
4602     unsigned N = 1;
4603     do {
4604       // Parse and remember the operand.
4605       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
4606                                      (N == 3 && condCodeThirdOperand) ||
4607                                      (N == 2 && condCodeSecondOperand),
4608                        condCodeSecondOperand || condCodeThirdOperand)) {
4609         return true;
4610       }
4611 
4612       // After successfully parsing some operands there are three special cases
4613       // to consider (i.e. notional operands not separated by commas). Two are
4614       // due to memory specifiers:
4615       //  + An RBrac will end an address for load/store/prefetch
4616       //  + An '!' will indicate a pre-indexed operation.
4617       //
4618       // And a further case is '}', which ends a group of tokens specifying the
4619       // SME accumulator array 'ZA' or tile vector, i.e.
4620       //
4621       //   '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
4622       //
4623       // It's someone else's responsibility to make sure these tokens are sane
4624       // in the given context!
4625 
4626       if (parseOptionalToken(AsmToken::RBrac))
4627         Operands.push_back(
4628             AArch64Operand::CreateToken("]", getLoc(), getContext()));
4629       if (parseOptionalToken(AsmToken::Exclaim))
4630         Operands.push_back(
4631             AArch64Operand::CreateToken("!", getLoc(), getContext()));
4632       if (parseOptionalToken(AsmToken::RCurly))
4633         Operands.push_back(
4634             AArch64Operand::CreateToken("}", getLoc(), getContext()));
4635 
4636       ++N;
4637     } while (parseOptionalToken(AsmToken::Comma));
4638   }
4639 
4640   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4641     return true;
4642 
4643   return false;
4644 }
4645 
4646 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
4647   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4648   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
4649          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
4650          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
4651          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
4652          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
4653          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
4654 }
4655 
4656 // FIXME: This entire function is a giant hack to provide us with decent
4657 // operand range validation/diagnostics until TableGen/MC can be extended
4658 // to support autogeneration of this kind of validation.
4659 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
4660                                            SmallVectorImpl<SMLoc> &Loc) {
4661   const MCRegisterInfo *RI = getContext().getRegisterInfo();
4662   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
4663 
4664   // A prefix only applies to the instruction following it.  Here we extract
4665   // prefix information for the next instruction before validating the current
4666   // one so that in the case of failure we don't erronously continue using the
4667   // current prefix.
4668   PrefixInfo Prefix = NextPrefix;
4669   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
4670 
4671   // Before validating the instruction in isolation we run through the rules
4672   // applicable when it follows a prefix instruction.
4673   // NOTE: brk & hlt can be prefixed but require no additional validation.
4674   if (Prefix.isActive() &&
4675       (Inst.getOpcode() != AArch64::BRK) &&
4676       (Inst.getOpcode() != AArch64::HLT)) {
4677 
4678     // Prefixed intructions must have a destructive operand.
4679     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
4680         AArch64::NotDestructive)
4681       return Error(IDLoc, "instruction is unpredictable when following a"
4682                    " movprfx, suggest replacing movprfx with mov");
4683 
4684     // Destination operands must match.
4685     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
4686       return Error(Loc[0], "instruction is unpredictable when following a"
4687                    " movprfx writing to a different destination");
4688 
4689     // Destination operand must not be used in any other location.
4690     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
4691       if (Inst.getOperand(i).isReg() &&
4692           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
4693           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
4694         return Error(Loc[0], "instruction is unpredictable when following a"
4695                      " movprfx and destination also used as non-destructive"
4696                      " source");
4697     }
4698 
4699     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4700     if (Prefix.isPredicated()) {
4701       int PgIdx = -1;
4702 
4703       // Find the instructions general predicate.
4704       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
4705         if (Inst.getOperand(i).isReg() &&
4706             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
4707           PgIdx = i;
4708           break;
4709         }
4710 
4711       // Instruction must be predicated if the movprfx is predicated.
4712       if (PgIdx == -1 ||
4713           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
4714         return Error(IDLoc, "instruction is unpredictable when following a"
4715                      " predicated movprfx, suggest using unpredicated movprfx");
4716 
4717       // Instruction must use same general predicate as the movprfx.
4718       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
4719         return Error(IDLoc, "instruction is unpredictable when following a"
4720                      " predicated movprfx using a different general predicate");
4721 
4722       // Instruction element type must match the movprfx.
4723       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
4724         return Error(IDLoc, "instruction is unpredictable when following a"
4725                      " predicated movprfx with a different element size");
4726     }
4727   }
4728 
4729   // Check for indexed addressing modes w/ the base register being the
4730   // same as a destination/source register or pair load where
4731   // the Rt == Rt2. All of those are undefined behaviour.
4732   switch (Inst.getOpcode()) {
4733   case AArch64::LDPSWpre:
4734   case AArch64::LDPWpost:
4735   case AArch64::LDPWpre:
4736   case AArch64::LDPXpost:
4737   case AArch64::LDPXpre: {
4738     unsigned Rt = Inst.getOperand(1).getReg();
4739     unsigned Rt2 = Inst.getOperand(2).getReg();
4740     unsigned Rn = Inst.getOperand(3).getReg();
4741     if (RI->isSubRegisterEq(Rn, Rt))
4742       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
4743                            "is also a destination");
4744     if (RI->isSubRegisterEq(Rn, Rt2))
4745       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
4746                            "is also a destination");
4747     LLVM_FALLTHROUGH;
4748   }
4749   case AArch64::LDPDi:
4750   case AArch64::LDPQi:
4751   case AArch64::LDPSi:
4752   case AArch64::LDPSWi:
4753   case AArch64::LDPWi:
4754   case AArch64::LDPXi: {
4755     unsigned Rt = Inst.getOperand(0).getReg();
4756     unsigned Rt2 = Inst.getOperand(1).getReg();
4757     if (Rt == Rt2)
4758       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4759     break;
4760   }
4761   case AArch64::LDPDpost:
4762   case AArch64::LDPDpre:
4763   case AArch64::LDPQpost:
4764   case AArch64::LDPQpre:
4765   case AArch64::LDPSpost:
4766   case AArch64::LDPSpre:
4767   case AArch64::LDPSWpost: {
4768     unsigned Rt = Inst.getOperand(1).getReg();
4769     unsigned Rt2 = Inst.getOperand(2).getReg();
4770     if (Rt == Rt2)
4771       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
4772     break;
4773   }
4774   case AArch64::STPDpost:
4775   case AArch64::STPDpre:
4776   case AArch64::STPQpost:
4777   case AArch64::STPQpre:
4778   case AArch64::STPSpost:
4779   case AArch64::STPSpre:
4780   case AArch64::STPWpost:
4781   case AArch64::STPWpre:
4782   case AArch64::STPXpost:
4783   case AArch64::STPXpre: {
4784     unsigned Rt = Inst.getOperand(1).getReg();
4785     unsigned Rt2 = Inst.getOperand(2).getReg();
4786     unsigned Rn = Inst.getOperand(3).getReg();
4787     if (RI->isSubRegisterEq(Rn, Rt))
4788       return Error(Loc[0], "unpredictable STP instruction, writeback base "
4789                            "is also a source");
4790     if (RI->isSubRegisterEq(Rn, Rt2))
4791       return Error(Loc[1], "unpredictable STP instruction, writeback base "
4792                            "is also a source");
4793     break;
4794   }
4795   case AArch64::LDRBBpre:
4796   case AArch64::LDRBpre:
4797   case AArch64::LDRHHpre:
4798   case AArch64::LDRHpre:
4799   case AArch64::LDRSBWpre:
4800   case AArch64::LDRSBXpre:
4801   case AArch64::LDRSHWpre:
4802   case AArch64::LDRSHXpre:
4803   case AArch64::LDRSWpre:
4804   case AArch64::LDRWpre:
4805   case AArch64::LDRXpre:
4806   case AArch64::LDRBBpost:
4807   case AArch64::LDRBpost:
4808   case AArch64::LDRHHpost:
4809   case AArch64::LDRHpost:
4810   case AArch64::LDRSBWpost:
4811   case AArch64::LDRSBXpost:
4812   case AArch64::LDRSHWpost:
4813   case AArch64::LDRSHXpost:
4814   case AArch64::LDRSWpost:
4815   case AArch64::LDRWpost:
4816   case AArch64::LDRXpost: {
4817     unsigned Rt = Inst.getOperand(1).getReg();
4818     unsigned Rn = Inst.getOperand(2).getReg();
4819     if (RI->isSubRegisterEq(Rn, Rt))
4820       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
4821                            "is also a source");
4822     break;
4823   }
4824   case AArch64::STRBBpost:
4825   case AArch64::STRBpost:
4826   case AArch64::STRHHpost:
4827   case AArch64::STRHpost:
4828   case AArch64::STRWpost:
4829   case AArch64::STRXpost:
4830   case AArch64::STRBBpre:
4831   case AArch64::STRBpre:
4832   case AArch64::STRHHpre:
4833   case AArch64::STRHpre:
4834   case AArch64::STRWpre:
4835   case AArch64::STRXpre: {
4836     unsigned Rt = Inst.getOperand(1).getReg();
4837     unsigned Rn = Inst.getOperand(2).getReg();
4838     if (RI->isSubRegisterEq(Rn, Rt))
4839       return Error(Loc[0], "unpredictable STR instruction, writeback base "
4840                            "is also a source");
4841     break;
4842   }
4843   case AArch64::STXRB:
4844   case AArch64::STXRH:
4845   case AArch64::STXRW:
4846   case AArch64::STXRX:
4847   case AArch64::STLXRB:
4848   case AArch64::STLXRH:
4849   case AArch64::STLXRW:
4850   case AArch64::STLXRX: {
4851     unsigned Rs = Inst.getOperand(0).getReg();
4852     unsigned Rt = Inst.getOperand(1).getReg();
4853     unsigned Rn = Inst.getOperand(2).getReg();
4854     if (RI->isSubRegisterEq(Rt, Rs) ||
4855         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4856       return Error(Loc[0],
4857                    "unpredictable STXR instruction, status is also a source");
4858     break;
4859   }
4860   case AArch64::STXPW:
4861   case AArch64::STXPX:
4862   case AArch64::STLXPW:
4863   case AArch64::STLXPX: {
4864     unsigned Rs = Inst.getOperand(0).getReg();
4865     unsigned Rt1 = Inst.getOperand(1).getReg();
4866     unsigned Rt2 = Inst.getOperand(2).getReg();
4867     unsigned Rn = Inst.getOperand(3).getReg();
4868     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
4869         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
4870       return Error(Loc[0],
4871                    "unpredictable STXP instruction, status is also a source");
4872     break;
4873   }
4874   case AArch64::LDRABwriteback:
4875   case AArch64::LDRAAwriteback: {
4876     unsigned Xt = Inst.getOperand(0).getReg();
4877     unsigned Xn = Inst.getOperand(1).getReg();
4878     if (Xt == Xn)
4879       return Error(Loc[0],
4880           "unpredictable LDRA instruction, writeback base"
4881           " is also a destination");
4882     break;
4883   }
4884   }
4885 
4886 
4887   // Now check immediate ranges. Separate from the above as there is overlap
4888   // in the instructions being checked and this keeps the nested conditionals
4889   // to a minimum.
4890   switch (Inst.getOpcode()) {
4891   case AArch64::ADDSWri:
4892   case AArch64::ADDSXri:
4893   case AArch64::ADDWri:
4894   case AArch64::ADDXri:
4895   case AArch64::SUBSWri:
4896   case AArch64::SUBSXri:
4897   case AArch64::SUBWri:
4898   case AArch64::SUBXri: {
4899     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
4900     // some slight duplication here.
4901     if (Inst.getOperand(2).isExpr()) {
4902       const MCExpr *Expr = Inst.getOperand(2).getExpr();
4903       AArch64MCExpr::VariantKind ELFRefKind;
4904       MCSymbolRefExpr::VariantKind DarwinRefKind;
4905       int64_t Addend;
4906       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4907 
4908         // Only allow these with ADDXri.
4909         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
4910              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
4911             Inst.getOpcode() == AArch64::ADDXri)
4912           return false;
4913 
4914         // Only allow these with ADDXri/ADDWri
4915         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
4916              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
4917              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
4918              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
4919              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
4920              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
4921              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
4922              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
4923              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
4924              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4925             (Inst.getOpcode() == AArch64::ADDXri ||
4926              Inst.getOpcode() == AArch64::ADDWri))
4927           return false;
4928 
4929         // Don't allow symbol refs in the immediate field otherwise
4930         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4931         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4932         // 'cmp w0, 'borked')
4933         return Error(Loc.back(), "invalid immediate expression");
4934       }
4935       // We don't validate more complex expressions here
4936     }
4937     return false;
4938   }
4939   default:
4940     return false;
4941   }
4942 }
4943 
4944 static std::string AArch64MnemonicSpellCheck(StringRef S,
4945                                              const FeatureBitset &FBS,
4946                                              unsigned VariantID = 0);
4947 
4948 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4949                                       uint64_t ErrorInfo,
4950                                       OperandVector &Operands) {
4951   switch (ErrCode) {
4952   case Match_InvalidTiedOperand: {
4953     RegConstraintEqualityTy EqTy =
4954         static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4955             .getRegEqualityTy();
4956     switch (EqTy) {
4957     case RegConstraintEqualityTy::EqualsSubReg:
4958       return Error(Loc, "operand must be 64-bit form of destination register");
4959     case RegConstraintEqualityTy::EqualsSuperReg:
4960       return Error(Loc, "operand must be 32-bit form of destination register");
4961     case RegConstraintEqualityTy::EqualsReg:
4962       return Error(Loc, "operand must match destination register");
4963     }
4964     llvm_unreachable("Unknown RegConstraintEqualityTy");
4965   }
4966   case Match_MissingFeature:
4967     return Error(Loc,
4968                  "instruction requires a CPU feature not currently enabled");
4969   case Match_InvalidOperand:
4970     return Error(Loc, "invalid operand for instruction");
4971   case Match_InvalidSuffix:
4972     return Error(Loc, "invalid type suffix for instruction");
4973   case Match_InvalidCondCode:
4974     return Error(Loc, "expected AArch64 condition code");
4975   case Match_AddSubRegExtendSmall:
4976     return Error(Loc,
4977       "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
4978   case Match_AddSubRegExtendLarge:
4979     return Error(Loc,
4980       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4981   case Match_AddSubSecondSource:
4982     return Error(Loc,
4983       "expected compatible register, symbol or integer in range [0, 4095]");
4984   case Match_LogicalSecondSource:
4985     return Error(Loc, "expected compatible register or logical immediate");
4986   case Match_InvalidMovImm32Shift:
4987     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4988   case Match_InvalidMovImm64Shift:
4989     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4990   case Match_AddSubRegShift32:
4991     return Error(Loc,
4992        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4993   case Match_AddSubRegShift64:
4994     return Error(Loc,
4995        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4996   case Match_InvalidFPImm:
4997     return Error(Loc,
4998                  "expected compatible register or floating-point constant");
4999   case Match_InvalidMemoryIndexedSImm6:
5000     return Error(Loc, "index must be an integer in range [-32, 31].");
5001   case Match_InvalidMemoryIndexedSImm5:
5002     return Error(Loc, "index must be an integer in range [-16, 15].");
5003   case Match_InvalidMemoryIndexed1SImm4:
5004     return Error(Loc, "index must be an integer in range [-8, 7].");
5005   case Match_InvalidMemoryIndexed2SImm4:
5006     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5007   case Match_InvalidMemoryIndexed3SImm4:
5008     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5009   case Match_InvalidMemoryIndexed4SImm4:
5010     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5011   case Match_InvalidMemoryIndexed16SImm4:
5012     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5013   case Match_InvalidMemoryIndexed32SImm4:
5014     return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5015   case Match_InvalidMemoryIndexed1SImm6:
5016     return Error(Loc, "index must be an integer in range [-32, 31].");
5017   case Match_InvalidMemoryIndexedSImm8:
5018     return Error(Loc, "index must be an integer in range [-128, 127].");
5019   case Match_InvalidMemoryIndexedSImm9:
5020     return Error(Loc, "index must be an integer in range [-256, 255].");
5021   case Match_InvalidMemoryIndexed16SImm9:
5022     return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5023   case Match_InvalidMemoryIndexed8SImm10:
5024     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5025   case Match_InvalidMemoryIndexed4SImm7:
5026     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5027   case Match_InvalidMemoryIndexed8SImm7:
5028     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5029   case Match_InvalidMemoryIndexed16SImm7:
5030     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5031   case Match_InvalidMemoryIndexed8UImm5:
5032     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5033   case Match_InvalidMemoryIndexed4UImm5:
5034     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5035   case Match_InvalidMemoryIndexed2UImm5:
5036     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5037   case Match_InvalidMemoryIndexed8UImm6:
5038     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5039   case Match_InvalidMemoryIndexed16UImm6:
5040     return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5041   case Match_InvalidMemoryIndexed4UImm6:
5042     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5043   case Match_InvalidMemoryIndexed2UImm6:
5044     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5045   case Match_InvalidMemoryIndexed1UImm6:
5046     return Error(Loc, "index must be in range [0, 63].");
5047   case Match_InvalidMemoryWExtend8:
5048     return Error(Loc,
5049                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
5050   case Match_InvalidMemoryWExtend16:
5051     return Error(Loc,
5052                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5053   case Match_InvalidMemoryWExtend32:
5054     return Error(Loc,
5055                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5056   case Match_InvalidMemoryWExtend64:
5057     return Error(Loc,
5058                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5059   case Match_InvalidMemoryWExtend128:
5060     return Error(Loc,
5061                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5062   case Match_InvalidMemoryXExtend8:
5063     return Error(Loc,
5064                  "expected 'lsl' or 'sxtx' with optional shift of #0");
5065   case Match_InvalidMemoryXExtend16:
5066     return Error(Loc,
5067                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5068   case Match_InvalidMemoryXExtend32:
5069     return Error(Loc,
5070                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5071   case Match_InvalidMemoryXExtend64:
5072     return Error(Loc,
5073                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5074   case Match_InvalidMemoryXExtend128:
5075     return Error(Loc,
5076                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5077   case Match_InvalidMemoryIndexed1:
5078     return Error(Loc, "index must be an integer in range [0, 4095].");
5079   case Match_InvalidMemoryIndexed2:
5080     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5081   case Match_InvalidMemoryIndexed4:
5082     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5083   case Match_InvalidMemoryIndexed8:
5084     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5085   case Match_InvalidMemoryIndexed16:
5086     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5087   case Match_InvalidImm0_1:
5088     return Error(Loc, "immediate must be an integer in range [0, 1].");
5089   case Match_InvalidImm0_3:
5090     return Error(Loc, "immediate must be an integer in range [0, 3].");
5091   case Match_InvalidImm0_7:
5092     return Error(Loc, "immediate must be an integer in range [0, 7].");
5093   case Match_InvalidImm0_15:
5094     return Error(Loc, "immediate must be an integer in range [0, 15].");
5095   case Match_InvalidImm0_31:
5096     return Error(Loc, "immediate must be an integer in range [0, 31].");
5097   case Match_InvalidImm0_63:
5098     return Error(Loc, "immediate must be an integer in range [0, 63].");
5099   case Match_InvalidImm0_127:
5100     return Error(Loc, "immediate must be an integer in range [0, 127].");
5101   case Match_InvalidImm0_255:
5102     return Error(Loc, "immediate must be an integer in range [0, 255].");
5103   case Match_InvalidImm0_65535:
5104     return Error(Loc, "immediate must be an integer in range [0, 65535].");
5105   case Match_InvalidImm1_8:
5106     return Error(Loc, "immediate must be an integer in range [1, 8].");
5107   case Match_InvalidImm1_16:
5108     return Error(Loc, "immediate must be an integer in range [1, 16].");
5109   case Match_InvalidImm1_32:
5110     return Error(Loc, "immediate must be an integer in range [1, 32].");
5111   case Match_InvalidImm1_64:
5112     return Error(Loc, "immediate must be an integer in range [1, 64].");
5113   case Match_InvalidSVEAddSubImm8:
5114     return Error(Loc, "immediate must be an integer in range [0, 255]"
5115                       " with a shift amount of 0");
5116   case Match_InvalidSVEAddSubImm16:
5117   case Match_InvalidSVEAddSubImm32:
5118   case Match_InvalidSVEAddSubImm64:
5119     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5120                       "multiple of 256 in range [256, 65280]");
5121   case Match_InvalidSVECpyImm8:
5122     return Error(Loc, "immediate must be an integer in range [-128, 255]"
5123                       " with a shift amount of 0");
5124   case Match_InvalidSVECpyImm16:
5125     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5126                       "multiple of 256 in range [-32768, 65280]");
5127   case Match_InvalidSVECpyImm32:
5128   case Match_InvalidSVECpyImm64:
5129     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5130                       "multiple of 256 in range [-32768, 32512]");
5131   case Match_InvalidIndexRange1_1:
5132     return Error(Loc, "expected lane specifier '[1]'");
5133   case Match_InvalidIndexRange0_15:
5134     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5135   case Match_InvalidIndexRange0_7:
5136     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5137   case Match_InvalidIndexRange0_3:
5138     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5139   case Match_InvalidIndexRange0_1:
5140     return Error(Loc, "vector lane must be an integer in range [0, 1].");
5141   case Match_InvalidSVEIndexRange0_63:
5142     return Error(Loc, "vector lane must be an integer in range [0, 63].");
5143   case Match_InvalidSVEIndexRange0_31:
5144     return Error(Loc, "vector lane must be an integer in range [0, 31].");
5145   case Match_InvalidSVEIndexRange0_15:
5146     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5147   case Match_InvalidSVEIndexRange0_7:
5148     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5149   case Match_InvalidSVEIndexRange0_3:
5150     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5151   case Match_InvalidLabel:
5152     return Error(Loc, "expected label or encodable integer pc offset");
5153   case Match_MRS:
5154     return Error(Loc, "expected readable system register");
5155   case Match_MSR:
5156   case Match_InvalidSVCR:
5157     return Error(Loc, "expected writable system register or pstate");
5158   case Match_InvalidComplexRotationEven:
5159     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5160   case Match_InvalidComplexRotationOdd:
5161     return Error(Loc, "complex rotation must be 90 or 270.");
5162   case Match_MnemonicFail: {
5163     std::string Suggestion = AArch64MnemonicSpellCheck(
5164         ((AArch64Operand &)*Operands[0]).getToken(),
5165         ComputeAvailableFeatures(STI->getFeatureBits()));
5166     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5167   }
5168   case Match_InvalidGPR64shifted8:
5169     return Error(Loc, "register must be x0..x30 or xzr, without shift");
5170   case Match_InvalidGPR64shifted16:
5171     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5172   case Match_InvalidGPR64shifted32:
5173     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5174   case Match_InvalidGPR64shifted64:
5175     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5176   case Match_InvalidGPR64shifted128:
5177     return Error(
5178         Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5179   case Match_InvalidGPR64NoXZRshifted8:
5180     return Error(Loc, "register must be x0..x30 without shift");
5181   case Match_InvalidGPR64NoXZRshifted16:
5182     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5183   case Match_InvalidGPR64NoXZRshifted32:
5184     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5185   case Match_InvalidGPR64NoXZRshifted64:
5186     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5187   case Match_InvalidGPR64NoXZRshifted128:
5188     return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5189   case Match_InvalidZPR32UXTW8:
5190   case Match_InvalidZPR32SXTW8:
5191     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5192   case Match_InvalidZPR32UXTW16:
5193   case Match_InvalidZPR32SXTW16:
5194     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5195   case Match_InvalidZPR32UXTW32:
5196   case Match_InvalidZPR32SXTW32:
5197     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5198   case Match_InvalidZPR32UXTW64:
5199   case Match_InvalidZPR32SXTW64:
5200     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5201   case Match_InvalidZPR64UXTW8:
5202   case Match_InvalidZPR64SXTW8:
5203     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5204   case Match_InvalidZPR64UXTW16:
5205   case Match_InvalidZPR64SXTW16:
5206     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5207   case Match_InvalidZPR64UXTW32:
5208   case Match_InvalidZPR64SXTW32:
5209     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5210   case Match_InvalidZPR64UXTW64:
5211   case Match_InvalidZPR64SXTW64:
5212     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5213   case Match_InvalidZPR32LSL8:
5214     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5215   case Match_InvalidZPR32LSL16:
5216     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5217   case Match_InvalidZPR32LSL32:
5218     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5219   case Match_InvalidZPR32LSL64:
5220     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5221   case Match_InvalidZPR64LSL8:
5222     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5223   case Match_InvalidZPR64LSL16:
5224     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5225   case Match_InvalidZPR64LSL32:
5226     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5227   case Match_InvalidZPR64LSL64:
5228     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5229   case Match_InvalidZPR0:
5230     return Error(Loc, "expected register without element width suffix");
5231   case Match_InvalidZPR8:
5232   case Match_InvalidZPR16:
5233   case Match_InvalidZPR32:
5234   case Match_InvalidZPR64:
5235   case Match_InvalidZPR128:
5236     return Error(Loc, "invalid element width");
5237   case Match_InvalidZPR_3b8:
5238     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5239   case Match_InvalidZPR_3b16:
5240     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5241   case Match_InvalidZPR_3b32:
5242     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5243   case Match_InvalidZPR_4b16:
5244     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5245   case Match_InvalidZPR_4b32:
5246     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5247   case Match_InvalidZPR_4b64:
5248     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5249   case Match_InvalidSVEPattern:
5250     return Error(Loc, "invalid predicate pattern");
5251   case Match_InvalidSVEPredicateAnyReg:
5252   case Match_InvalidSVEPredicateBReg:
5253   case Match_InvalidSVEPredicateHReg:
5254   case Match_InvalidSVEPredicateSReg:
5255   case Match_InvalidSVEPredicateDReg:
5256     return Error(Loc, "invalid predicate register.");
5257   case Match_InvalidSVEPredicate3bAnyReg:
5258     return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5259   case Match_InvalidSVEPredicate3bBReg:
5260     return Error(Loc, "invalid restricted predicate register, expected p0.b..p7.b");
5261   case Match_InvalidSVEPredicate3bHReg:
5262     return Error(Loc, "invalid restricted predicate register, expected p0.h..p7.h");
5263   case Match_InvalidSVEPredicate3bSReg:
5264     return Error(Loc, "invalid restricted predicate register, expected p0.s..p7.s");
5265   case Match_InvalidSVEPredicate3bDReg:
5266     return Error(Loc, "invalid restricted predicate register, expected p0.d..p7.d");
5267   case Match_InvalidSVEExactFPImmOperandHalfOne:
5268     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5269   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5270     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5271   case Match_InvalidSVEExactFPImmOperandZeroOne:
5272     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5273   case Match_InvalidMatrixTileVectorH8:
5274   case Match_InvalidMatrixTileVectorV8:
5275     return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5276   case Match_InvalidMatrixTileVectorH16:
5277   case Match_InvalidMatrixTileVectorV16:
5278     return Error(Loc,
5279                  "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5280   case Match_InvalidMatrixTileVectorH32:
5281   case Match_InvalidMatrixTileVectorV32:
5282     return Error(Loc,
5283                  "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5284   case Match_InvalidMatrixTileVectorH64:
5285   case Match_InvalidMatrixTileVectorV64:
5286     return Error(Loc,
5287                  "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5288   case Match_InvalidMatrixTileVectorH128:
5289   case Match_InvalidMatrixTileVectorV128:
5290     return Error(Loc,
5291                  "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5292   case Match_InvalidMatrixTile32:
5293     return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5294   case Match_InvalidMatrixTile64:
5295     return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5296   case Match_InvalidMatrix:
5297     return Error(Loc, "invalid matrix operand, expected za");
5298   case Match_InvalidMatrixIndexGPR32_12_15:
5299     return Error(Loc, "operand must be a register in range [w12, w15]");
5300   default:
5301     llvm_unreachable("unexpected error code!");
5302   }
5303 }
5304 
5305 static const char *getSubtargetFeatureName(uint64_t Val);
5306 
5307 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
5308                                                OperandVector &Operands,
5309                                                MCStreamer &Out,
5310                                                uint64_t &ErrorInfo,
5311                                                bool MatchingInlineAsm) {
5312   assert(!Operands.empty() && "Unexpect empty operand list!");
5313   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
5314   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
5315 
5316   StringRef Tok = Op.getToken();
5317   unsigned NumOperands = Operands.size();
5318 
5319   if (NumOperands == 4 && Tok == "lsl") {
5320     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5321     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5322     if (Op2.isScalarReg() && Op3.isImm()) {
5323       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5324       if (Op3CE) {
5325         uint64_t Op3Val = Op3CE->getValue();
5326         uint64_t NewOp3Val = 0;
5327         uint64_t NewOp4Val = 0;
5328         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
5329                 Op2.getReg())) {
5330           NewOp3Val = (32 - Op3Val) & 0x1f;
5331           NewOp4Val = 31 - Op3Val;
5332         } else {
5333           NewOp3Val = (64 - Op3Val) & 0x3f;
5334           NewOp4Val = 63 - Op3Val;
5335         }
5336 
5337         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
5338         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
5339 
5340         Operands[0] =
5341             AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
5342         Operands.push_back(AArch64Operand::CreateImm(
5343             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
5344         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
5345                                                 Op3.getEndLoc(), getContext());
5346       }
5347     }
5348   } else if (NumOperands == 4 && Tok == "bfc") {
5349     // FIXME: Horrible hack to handle BFC->BFM alias.
5350     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5351     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
5352     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
5353 
5354     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
5355       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
5356       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
5357 
5358       if (LSBCE && WidthCE) {
5359         uint64_t LSB = LSBCE->getValue();
5360         uint64_t Width = WidthCE->getValue();
5361 
5362         uint64_t RegWidth = 0;
5363         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5364                 Op1.getReg()))
5365           RegWidth = 64;
5366         else
5367           RegWidth = 32;
5368 
5369         if (LSB >= RegWidth)
5370           return Error(LSBOp.getStartLoc(),
5371                        "expected integer in range [0, 31]");
5372         if (Width < 1 || Width > RegWidth)
5373           return Error(WidthOp.getStartLoc(),
5374                        "expected integer in range [1, 32]");
5375 
5376         uint64_t ImmR = 0;
5377         if (RegWidth == 32)
5378           ImmR = (32 - LSB) & 0x1f;
5379         else
5380           ImmR = (64 - LSB) & 0x3f;
5381 
5382         uint64_t ImmS = Width - 1;
5383 
5384         if (ImmR != 0 && ImmS >= ImmR)
5385           return Error(WidthOp.getStartLoc(),
5386                        "requested insert overflows register");
5387 
5388         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
5389         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
5390         Operands[0] =
5391             AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
5392         Operands[2] = AArch64Operand::CreateReg(
5393             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
5394             SMLoc(), SMLoc(), getContext());
5395         Operands[3] = AArch64Operand::CreateImm(
5396             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
5397         Operands.emplace_back(
5398             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
5399                                       WidthOp.getEndLoc(), getContext()));
5400       }
5401     }
5402   } else if (NumOperands == 5) {
5403     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
5404     // UBFIZ -> UBFM aliases.
5405     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
5406       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5407       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5408       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5409 
5410       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5411         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5412         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5413 
5414         if (Op3CE && Op4CE) {
5415           uint64_t Op3Val = Op3CE->getValue();
5416           uint64_t Op4Val = Op4CE->getValue();
5417 
5418           uint64_t RegWidth = 0;
5419           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5420                   Op1.getReg()))
5421             RegWidth = 64;
5422           else
5423             RegWidth = 32;
5424 
5425           if (Op3Val >= RegWidth)
5426             return Error(Op3.getStartLoc(),
5427                          "expected integer in range [0, 31]");
5428           if (Op4Val < 1 || Op4Val > RegWidth)
5429             return Error(Op4.getStartLoc(),
5430                          "expected integer in range [1, 32]");
5431 
5432           uint64_t NewOp3Val = 0;
5433           if (RegWidth == 32)
5434             NewOp3Val = (32 - Op3Val) & 0x1f;
5435           else
5436             NewOp3Val = (64 - Op3Val) & 0x3f;
5437 
5438           uint64_t NewOp4Val = Op4Val - 1;
5439 
5440           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
5441             return Error(Op4.getStartLoc(),
5442                          "requested insert overflows register");
5443 
5444           const MCExpr *NewOp3 =
5445               MCConstantExpr::create(NewOp3Val, getContext());
5446           const MCExpr *NewOp4 =
5447               MCConstantExpr::create(NewOp4Val, getContext());
5448           Operands[3] = AArch64Operand::CreateImm(
5449               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
5450           Operands[4] = AArch64Operand::CreateImm(
5451               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5452           if (Tok == "bfi")
5453             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5454                                                       getContext());
5455           else if (Tok == "sbfiz")
5456             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5457                                                       getContext());
5458           else if (Tok == "ubfiz")
5459             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5460                                                       getContext());
5461           else
5462             llvm_unreachable("No valid mnemonic for alias?");
5463         }
5464       }
5465 
5466       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
5467       // UBFX -> UBFM aliases.
5468     } else if (NumOperands == 5 &&
5469                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
5470       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5471       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5472       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
5473 
5474       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
5475         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
5476         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
5477 
5478         if (Op3CE && Op4CE) {
5479           uint64_t Op3Val = Op3CE->getValue();
5480           uint64_t Op4Val = Op4CE->getValue();
5481 
5482           uint64_t RegWidth = 0;
5483           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5484                   Op1.getReg()))
5485             RegWidth = 64;
5486           else
5487             RegWidth = 32;
5488 
5489           if (Op3Val >= RegWidth)
5490             return Error(Op3.getStartLoc(),
5491                          "expected integer in range [0, 31]");
5492           if (Op4Val < 1 || Op4Val > RegWidth)
5493             return Error(Op4.getStartLoc(),
5494                          "expected integer in range [1, 32]");
5495 
5496           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
5497 
5498           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
5499             return Error(Op4.getStartLoc(),
5500                          "requested extract overflows register");
5501 
5502           const MCExpr *NewOp4 =
5503               MCConstantExpr::create(NewOp4Val, getContext());
5504           Operands[4] = AArch64Operand::CreateImm(
5505               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
5506           if (Tok == "bfxil")
5507             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
5508                                                       getContext());
5509           else if (Tok == "sbfx")
5510             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
5511                                                       getContext());
5512           else if (Tok == "ubfx")
5513             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
5514                                                       getContext());
5515           else
5516             llvm_unreachable("No valid mnemonic for alias?");
5517         }
5518       }
5519     }
5520   }
5521 
5522   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
5523   // instruction for FP registers correctly in some rare circumstances. Convert
5524   // it to a safe instruction and warn (because silently changing someone's
5525   // assembly is rude).
5526   if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
5527       NumOperands == 4 && Tok == "movi") {
5528     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
5529     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
5530     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
5531     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
5532         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
5533       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
5534       if (Suffix.lower() == ".2d" &&
5535           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
5536         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
5537                 " correctly on this CPU, converting to equivalent movi.16b");
5538         // Switch the suffix to .16b.
5539         unsigned Idx = Op1.isToken() ? 1 : 2;
5540         Operands[Idx] =
5541             AArch64Operand::CreateToken(".16b", IDLoc, getContext());
5542       }
5543     }
5544   }
5545 
5546   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
5547   //        InstAlias can't quite handle this since the reg classes aren't
5548   //        subclasses.
5549   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
5550     // The source register can be Wn here, but the matcher expects a
5551     // GPR64. Twiddle it here if necessary.
5552     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5553     if (Op.isScalarReg()) {
5554       unsigned Reg = getXRegFromWReg(Op.getReg());
5555       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5556                                               Op.getStartLoc(), Op.getEndLoc(),
5557                                               getContext());
5558     }
5559   }
5560   // FIXME: Likewise for sxt[bh] with a Xd dst operand
5561   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
5562     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5563     if (Op.isScalarReg() &&
5564         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5565             Op.getReg())) {
5566       // The source register can be Wn here, but the matcher expects a
5567       // GPR64. Twiddle it here if necessary.
5568       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
5569       if (Op.isScalarReg()) {
5570         unsigned Reg = getXRegFromWReg(Op.getReg());
5571         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5572                                                 Op.getStartLoc(),
5573                                                 Op.getEndLoc(), getContext());
5574       }
5575     }
5576   }
5577   // FIXME: Likewise for uxt[bh] with a Xd dst operand
5578   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
5579     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5580     if (Op.isScalarReg() &&
5581         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5582             Op.getReg())) {
5583       // The source register can be Wn here, but the matcher expects a
5584       // GPR32. Twiddle it here if necessary.
5585       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
5586       if (Op.isScalarReg()) {
5587         unsigned Reg = getWRegFromXReg(Op.getReg());
5588         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
5589                                                 Op.getStartLoc(),
5590                                                 Op.getEndLoc(), getContext());
5591       }
5592     }
5593   }
5594 
5595   MCInst Inst;
5596   FeatureBitset MissingFeatures;
5597   // First try to match against the secondary set of tables containing the
5598   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
5599   unsigned MatchResult =
5600       MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5601                            MatchingInlineAsm, 1);
5602 
5603   // If that fails, try against the alternate table containing long-form NEON:
5604   // "fadd v0.2s, v1.2s, v2.2s"
5605   if (MatchResult != Match_Success) {
5606     // But first, save the short-form match result: we can use it in case the
5607     // long-form match also fails.
5608     auto ShortFormNEONErrorInfo = ErrorInfo;
5609     auto ShortFormNEONMatchResult = MatchResult;
5610     auto ShortFormNEONMissingFeatures = MissingFeatures;
5611 
5612     MatchResult =
5613         MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
5614                              MatchingInlineAsm, 0);
5615 
5616     // Now, both matches failed, and the long-form match failed on the mnemonic
5617     // suffix token operand.  The short-form match failure is probably more
5618     // relevant: use it instead.
5619     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
5620         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
5621         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
5622       MatchResult = ShortFormNEONMatchResult;
5623       ErrorInfo = ShortFormNEONErrorInfo;
5624       MissingFeatures = ShortFormNEONMissingFeatures;
5625     }
5626   }
5627 
5628   switch (MatchResult) {
5629   case Match_Success: {
5630     // Perform range checking and other semantic validations
5631     SmallVector<SMLoc, 8> OperandLocs;
5632     NumOperands = Operands.size();
5633     for (unsigned i = 1; i < NumOperands; ++i)
5634       OperandLocs.push_back(Operands[i]->getStartLoc());
5635     if (validateInstruction(Inst, IDLoc, OperandLocs))
5636       return true;
5637 
5638     Inst.setLoc(IDLoc);
5639     Out.emitInstruction(Inst, getSTI());
5640     return false;
5641   }
5642   case Match_MissingFeature: {
5643     assert(MissingFeatures.any() && "Unknown missing feature!");
5644     // Special case the error message for the very common case where only
5645     // a single subtarget feature is missing (neon, e.g.).
5646     std::string Msg = "instruction requires:";
5647     for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
5648       if (MissingFeatures[i]) {
5649         Msg += " ";
5650         Msg += getSubtargetFeatureName(i);
5651       }
5652     }
5653     return Error(IDLoc, Msg);
5654   }
5655   case Match_MnemonicFail:
5656     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
5657   case Match_InvalidOperand: {
5658     SMLoc ErrorLoc = IDLoc;
5659 
5660     if (ErrorInfo != ~0ULL) {
5661       if (ErrorInfo >= Operands.size())
5662         return Error(IDLoc, "too few operands for instruction",
5663                      SMRange(IDLoc, getTok().getLoc()));
5664 
5665       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5666       if (ErrorLoc == SMLoc())
5667         ErrorLoc = IDLoc;
5668     }
5669     // If the match failed on a suffix token operand, tweak the diagnostic
5670     // accordingly.
5671     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
5672         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
5673       MatchResult = Match_InvalidSuffix;
5674 
5675     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5676   }
5677   case Match_InvalidTiedOperand:
5678   case Match_InvalidMemoryIndexed1:
5679   case Match_InvalidMemoryIndexed2:
5680   case Match_InvalidMemoryIndexed4:
5681   case Match_InvalidMemoryIndexed8:
5682   case Match_InvalidMemoryIndexed16:
5683   case Match_InvalidCondCode:
5684   case Match_AddSubRegExtendSmall:
5685   case Match_AddSubRegExtendLarge:
5686   case Match_AddSubSecondSource:
5687   case Match_LogicalSecondSource:
5688   case Match_AddSubRegShift32:
5689   case Match_AddSubRegShift64:
5690   case Match_InvalidMovImm32Shift:
5691   case Match_InvalidMovImm64Shift:
5692   case Match_InvalidFPImm:
5693   case Match_InvalidMemoryWExtend8:
5694   case Match_InvalidMemoryWExtend16:
5695   case Match_InvalidMemoryWExtend32:
5696   case Match_InvalidMemoryWExtend64:
5697   case Match_InvalidMemoryWExtend128:
5698   case Match_InvalidMemoryXExtend8:
5699   case Match_InvalidMemoryXExtend16:
5700   case Match_InvalidMemoryXExtend32:
5701   case Match_InvalidMemoryXExtend64:
5702   case Match_InvalidMemoryXExtend128:
5703   case Match_InvalidMemoryIndexed1SImm4:
5704   case Match_InvalidMemoryIndexed2SImm4:
5705   case Match_InvalidMemoryIndexed3SImm4:
5706   case Match_InvalidMemoryIndexed4SImm4:
5707   case Match_InvalidMemoryIndexed1SImm6:
5708   case Match_InvalidMemoryIndexed16SImm4:
5709   case Match_InvalidMemoryIndexed32SImm4:
5710   case Match_InvalidMemoryIndexed4SImm7:
5711   case Match_InvalidMemoryIndexed8SImm7:
5712   case Match_InvalidMemoryIndexed16SImm7:
5713   case Match_InvalidMemoryIndexed8UImm5:
5714   case Match_InvalidMemoryIndexed4UImm5:
5715   case Match_InvalidMemoryIndexed2UImm5:
5716   case Match_InvalidMemoryIndexed1UImm6:
5717   case Match_InvalidMemoryIndexed2UImm6:
5718   case Match_InvalidMemoryIndexed4UImm6:
5719   case Match_InvalidMemoryIndexed8UImm6:
5720   case Match_InvalidMemoryIndexed16UImm6:
5721   case Match_InvalidMemoryIndexedSImm6:
5722   case Match_InvalidMemoryIndexedSImm5:
5723   case Match_InvalidMemoryIndexedSImm8:
5724   case Match_InvalidMemoryIndexedSImm9:
5725   case Match_InvalidMemoryIndexed16SImm9:
5726   case Match_InvalidMemoryIndexed8SImm10:
5727   case Match_InvalidImm0_1:
5728   case Match_InvalidImm0_3:
5729   case Match_InvalidImm0_7:
5730   case Match_InvalidImm0_15:
5731   case Match_InvalidImm0_31:
5732   case Match_InvalidImm0_63:
5733   case Match_InvalidImm0_127:
5734   case Match_InvalidImm0_255:
5735   case Match_InvalidImm0_65535:
5736   case Match_InvalidImm1_8:
5737   case Match_InvalidImm1_16:
5738   case Match_InvalidImm1_32:
5739   case Match_InvalidImm1_64:
5740   case Match_InvalidSVEAddSubImm8:
5741   case Match_InvalidSVEAddSubImm16:
5742   case Match_InvalidSVEAddSubImm32:
5743   case Match_InvalidSVEAddSubImm64:
5744   case Match_InvalidSVECpyImm8:
5745   case Match_InvalidSVECpyImm16:
5746   case Match_InvalidSVECpyImm32:
5747   case Match_InvalidSVECpyImm64:
5748   case Match_InvalidIndexRange1_1:
5749   case Match_InvalidIndexRange0_15:
5750   case Match_InvalidIndexRange0_7:
5751   case Match_InvalidIndexRange0_3:
5752   case Match_InvalidIndexRange0_1:
5753   case Match_InvalidSVEIndexRange0_63:
5754   case Match_InvalidSVEIndexRange0_31:
5755   case Match_InvalidSVEIndexRange0_15:
5756   case Match_InvalidSVEIndexRange0_7:
5757   case Match_InvalidSVEIndexRange0_3:
5758   case Match_InvalidLabel:
5759   case Match_InvalidComplexRotationEven:
5760   case Match_InvalidComplexRotationOdd:
5761   case Match_InvalidGPR64shifted8:
5762   case Match_InvalidGPR64shifted16:
5763   case Match_InvalidGPR64shifted32:
5764   case Match_InvalidGPR64shifted64:
5765   case Match_InvalidGPR64shifted128:
5766   case Match_InvalidGPR64NoXZRshifted8:
5767   case Match_InvalidGPR64NoXZRshifted16:
5768   case Match_InvalidGPR64NoXZRshifted32:
5769   case Match_InvalidGPR64NoXZRshifted64:
5770   case Match_InvalidGPR64NoXZRshifted128:
5771   case Match_InvalidZPR32UXTW8:
5772   case Match_InvalidZPR32UXTW16:
5773   case Match_InvalidZPR32UXTW32:
5774   case Match_InvalidZPR32UXTW64:
5775   case Match_InvalidZPR32SXTW8:
5776   case Match_InvalidZPR32SXTW16:
5777   case Match_InvalidZPR32SXTW32:
5778   case Match_InvalidZPR32SXTW64:
5779   case Match_InvalidZPR64UXTW8:
5780   case Match_InvalidZPR64SXTW8:
5781   case Match_InvalidZPR64UXTW16:
5782   case Match_InvalidZPR64SXTW16:
5783   case Match_InvalidZPR64UXTW32:
5784   case Match_InvalidZPR64SXTW32:
5785   case Match_InvalidZPR64UXTW64:
5786   case Match_InvalidZPR64SXTW64:
5787   case Match_InvalidZPR32LSL8:
5788   case Match_InvalidZPR32LSL16:
5789   case Match_InvalidZPR32LSL32:
5790   case Match_InvalidZPR32LSL64:
5791   case Match_InvalidZPR64LSL8:
5792   case Match_InvalidZPR64LSL16:
5793   case Match_InvalidZPR64LSL32:
5794   case Match_InvalidZPR64LSL64:
5795   case Match_InvalidZPR0:
5796   case Match_InvalidZPR8:
5797   case Match_InvalidZPR16:
5798   case Match_InvalidZPR32:
5799   case Match_InvalidZPR64:
5800   case Match_InvalidZPR128:
5801   case Match_InvalidZPR_3b8:
5802   case Match_InvalidZPR_3b16:
5803   case Match_InvalidZPR_3b32:
5804   case Match_InvalidZPR_4b16:
5805   case Match_InvalidZPR_4b32:
5806   case Match_InvalidZPR_4b64:
5807   case Match_InvalidSVEPredicateAnyReg:
5808   case Match_InvalidSVEPattern:
5809   case Match_InvalidSVEPredicateBReg:
5810   case Match_InvalidSVEPredicateHReg:
5811   case Match_InvalidSVEPredicateSReg:
5812   case Match_InvalidSVEPredicateDReg:
5813   case Match_InvalidSVEPredicate3bAnyReg:
5814   case Match_InvalidSVEPredicate3bBReg:
5815   case Match_InvalidSVEPredicate3bHReg:
5816   case Match_InvalidSVEPredicate3bSReg:
5817   case Match_InvalidSVEPredicate3bDReg:
5818   case Match_InvalidSVEExactFPImmOperandHalfOne:
5819   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5820   case Match_InvalidSVEExactFPImmOperandZeroOne:
5821   case Match_InvalidMatrixTile32:
5822   case Match_InvalidMatrixTile64:
5823   case Match_InvalidMatrix:
5824   case Match_InvalidMatrixTileVectorH8:
5825   case Match_InvalidMatrixTileVectorH16:
5826   case Match_InvalidMatrixTileVectorH32:
5827   case Match_InvalidMatrixTileVectorH64:
5828   case Match_InvalidMatrixTileVectorH128:
5829   case Match_InvalidMatrixTileVectorV8:
5830   case Match_InvalidMatrixTileVectorV16:
5831   case Match_InvalidMatrixTileVectorV32:
5832   case Match_InvalidMatrixTileVectorV64:
5833   case Match_InvalidMatrixTileVectorV128:
5834   case Match_InvalidSVCR:
5835   case Match_InvalidMatrixIndexGPR32_12_15:
5836   case Match_MSR:
5837   case Match_MRS: {
5838     if (ErrorInfo >= Operands.size())
5839       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
5840     // Any time we get here, there's nothing fancy to do. Just get the
5841     // operand SMLoc and display the diagnostic.
5842     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5843     if (ErrorLoc == SMLoc())
5844       ErrorLoc = IDLoc;
5845     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5846   }
5847   }
5848 
5849   llvm_unreachable("Implement any new match types added!");
5850 }
5851 
5852 /// ParseDirective parses the arm specific directives
5853 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
5854   const MCContext::Environment Format = getContext().getObjectFileType();
5855   bool IsMachO = Format == MCContext::IsMachO;
5856   bool IsCOFF = Format == MCContext::IsCOFF;
5857 
5858   auto IDVal = DirectiveID.getIdentifier().lower();
5859   SMLoc Loc = DirectiveID.getLoc();
5860   if (IDVal == ".arch")
5861     parseDirectiveArch(Loc);
5862   else if (IDVal == ".cpu")
5863     parseDirectiveCPU(Loc);
5864   else if (IDVal == ".tlsdesccall")
5865     parseDirectiveTLSDescCall(Loc);
5866   else if (IDVal == ".ltorg" || IDVal == ".pool")
5867     parseDirectiveLtorg(Loc);
5868   else if (IDVal == ".unreq")
5869     parseDirectiveUnreq(Loc);
5870   else if (IDVal == ".inst")
5871     parseDirectiveInst(Loc);
5872   else if (IDVal == ".cfi_negate_ra_state")
5873     parseDirectiveCFINegateRAState();
5874   else if (IDVal == ".cfi_b_key_frame")
5875     parseDirectiveCFIBKeyFrame();
5876   else if (IDVal == ".arch_extension")
5877     parseDirectiveArchExtension(Loc);
5878   else if (IDVal == ".variant_pcs")
5879     parseDirectiveVariantPCS(Loc);
5880   else if (IsMachO) {
5881     if (IDVal == MCLOHDirectiveName())
5882       parseDirectiveLOH(IDVal, Loc);
5883     else
5884       return true;
5885   } else if (IsCOFF) {
5886     if (IDVal == ".seh_stackalloc")
5887       parseDirectiveSEHAllocStack(Loc);
5888     else if (IDVal == ".seh_endprologue")
5889       parseDirectiveSEHPrologEnd(Loc);
5890     else if (IDVal == ".seh_save_r19r20_x")
5891       parseDirectiveSEHSaveR19R20X(Loc);
5892     else if (IDVal == ".seh_save_fplr")
5893       parseDirectiveSEHSaveFPLR(Loc);
5894     else if (IDVal == ".seh_save_fplr_x")
5895       parseDirectiveSEHSaveFPLRX(Loc);
5896     else if (IDVal == ".seh_save_reg")
5897       parseDirectiveSEHSaveReg(Loc);
5898     else if (IDVal == ".seh_save_reg_x")
5899       parseDirectiveSEHSaveRegX(Loc);
5900     else if (IDVal == ".seh_save_regp")
5901       parseDirectiveSEHSaveRegP(Loc);
5902     else if (IDVal == ".seh_save_regp_x")
5903       parseDirectiveSEHSaveRegPX(Loc);
5904     else if (IDVal == ".seh_save_lrpair")
5905       parseDirectiveSEHSaveLRPair(Loc);
5906     else if (IDVal == ".seh_save_freg")
5907       parseDirectiveSEHSaveFReg(Loc);
5908     else if (IDVal == ".seh_save_freg_x")
5909       parseDirectiveSEHSaveFRegX(Loc);
5910     else if (IDVal == ".seh_save_fregp")
5911       parseDirectiveSEHSaveFRegP(Loc);
5912     else if (IDVal == ".seh_save_fregp_x")
5913       parseDirectiveSEHSaveFRegPX(Loc);
5914     else if (IDVal == ".seh_set_fp")
5915       parseDirectiveSEHSetFP(Loc);
5916     else if (IDVal == ".seh_add_fp")
5917       parseDirectiveSEHAddFP(Loc);
5918     else if (IDVal == ".seh_nop")
5919       parseDirectiveSEHNop(Loc);
5920     else if (IDVal == ".seh_save_next")
5921       parseDirectiveSEHSaveNext(Loc);
5922     else if (IDVal == ".seh_startepilogue")
5923       parseDirectiveSEHEpilogStart(Loc);
5924     else if (IDVal == ".seh_endepilogue")
5925       parseDirectiveSEHEpilogEnd(Loc);
5926     else if (IDVal == ".seh_trap_frame")
5927       parseDirectiveSEHTrapFrame(Loc);
5928     else if (IDVal == ".seh_pushframe")
5929       parseDirectiveSEHMachineFrame(Loc);
5930     else if (IDVal == ".seh_context")
5931       parseDirectiveSEHContext(Loc);
5932     else if (IDVal == ".seh_clear_unwound_to_call")
5933       parseDirectiveSEHClearUnwoundToCall(Loc);
5934     else
5935       return true;
5936   } else
5937     return true;
5938   return false;
5939 }
5940 
5941 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
5942                             SmallVector<StringRef, 4> &RequestedExtensions) {
5943   const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
5944   const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
5945 
5946   if (!NoCrypto && Crypto) {
5947     switch (ArchKind) {
5948     default:
5949       // Map 'generic' (and others) to sha2 and aes, because
5950       // that was the traditional meaning of crypto.
5951     case AArch64::ArchKind::ARMV8_1A:
5952     case AArch64::ArchKind::ARMV8_2A:
5953     case AArch64::ArchKind::ARMV8_3A:
5954       RequestedExtensions.push_back("sha2");
5955       RequestedExtensions.push_back("aes");
5956       break;
5957     case AArch64::ArchKind::ARMV8_4A:
5958     case AArch64::ArchKind::ARMV8_5A:
5959     case AArch64::ArchKind::ARMV8_6A:
5960     case AArch64::ArchKind::ARMV8_7A:
5961     case AArch64::ArchKind::ARMV8R:
5962       RequestedExtensions.push_back("sm4");
5963       RequestedExtensions.push_back("sha3");
5964       RequestedExtensions.push_back("sha2");
5965       RequestedExtensions.push_back("aes");
5966       break;
5967     }
5968   } else if (NoCrypto) {
5969     switch (ArchKind) {
5970     default:
5971       // Map 'generic' (and others) to sha2 and aes, because
5972       // that was the traditional meaning of crypto.
5973     case AArch64::ArchKind::ARMV8_1A:
5974     case AArch64::ArchKind::ARMV8_2A:
5975     case AArch64::ArchKind::ARMV8_3A:
5976       RequestedExtensions.push_back("nosha2");
5977       RequestedExtensions.push_back("noaes");
5978       break;
5979     case AArch64::ArchKind::ARMV8_4A:
5980     case AArch64::ArchKind::ARMV8_5A:
5981     case AArch64::ArchKind::ARMV8_6A:
5982     case AArch64::ArchKind::ARMV8_7A:
5983       RequestedExtensions.push_back("nosm4");
5984       RequestedExtensions.push_back("nosha3");
5985       RequestedExtensions.push_back("nosha2");
5986       RequestedExtensions.push_back("noaes");
5987       break;
5988     }
5989   }
5990 }
5991 
5992 /// parseDirectiveArch
5993 ///   ::= .arch token
5994 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
5995   SMLoc ArchLoc = getLoc();
5996 
5997   StringRef Arch, ExtensionString;
5998   std::tie(Arch, ExtensionString) =
5999       getParser().parseStringToEndOfStatement().trim().split('+');
6000 
6001   AArch64::ArchKind ID = AArch64::parseArch(Arch);
6002   if (ID == AArch64::ArchKind::INVALID)
6003     return Error(ArchLoc, "unknown arch name");
6004 
6005   if (parseToken(AsmToken::EndOfStatement))
6006     return true;
6007 
6008   // Get the architecture and extension features.
6009   std::vector<StringRef> AArch64Features;
6010   AArch64::getArchFeatures(ID, AArch64Features);
6011   AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
6012                                 AArch64Features);
6013 
6014   MCSubtargetInfo &STI = copySTI();
6015   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6016   STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6017                          join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6018 
6019   SmallVector<StringRef, 4> RequestedExtensions;
6020   if (!ExtensionString.empty())
6021     ExtensionString.split(RequestedExtensions, '+');
6022 
6023   ExpandCryptoAEK(ID, RequestedExtensions);
6024 
6025   FeatureBitset Features = STI.getFeatureBits();
6026   for (auto Name : RequestedExtensions) {
6027     bool EnableFeature = true;
6028 
6029     if (Name.startswith_insensitive("no")) {
6030       EnableFeature = false;
6031       Name = Name.substr(2);
6032     }
6033 
6034     for (const auto &Extension : ExtensionMap) {
6035       if (Extension.Name != Name)
6036         continue;
6037 
6038       if (Extension.Features.none())
6039         report_fatal_error("unsupported architectural extension: " + Name);
6040 
6041       FeatureBitset ToggleFeatures = EnableFeature
6042                                          ? (~Features & Extension.Features)
6043                                          : ( Features & Extension.Features);
6044       FeatureBitset Features =
6045           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6046       setAvailableFeatures(Features);
6047       break;
6048     }
6049   }
6050   return false;
6051 }
6052 
6053 /// parseDirectiveArchExtension
6054 ///   ::= .arch_extension [no]feature
6055 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6056   SMLoc ExtLoc = getLoc();
6057 
6058   StringRef Name = getParser().parseStringToEndOfStatement().trim();
6059 
6060   if (parseToken(AsmToken::EndOfStatement,
6061                  "unexpected token in '.arch_extension' directive"))
6062     return true;
6063 
6064   bool EnableFeature = true;
6065   if (Name.startswith_insensitive("no")) {
6066     EnableFeature = false;
6067     Name = Name.substr(2);
6068   }
6069 
6070   MCSubtargetInfo &STI = copySTI();
6071   FeatureBitset Features = STI.getFeatureBits();
6072   for (const auto &Extension : ExtensionMap) {
6073     if (Extension.Name != Name)
6074       continue;
6075 
6076     if (Extension.Features.none())
6077       return Error(ExtLoc, "unsupported architectural extension: " + Name);
6078 
6079     FeatureBitset ToggleFeatures = EnableFeature
6080                                        ? (~Features & Extension.Features)
6081                                        : (Features & Extension.Features);
6082     FeatureBitset Features =
6083         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6084     setAvailableFeatures(Features);
6085     return false;
6086   }
6087 
6088   return Error(ExtLoc, "unknown architectural extension: " + Name);
6089 }
6090 
6091 static SMLoc incrementLoc(SMLoc L, int Offset) {
6092   return SMLoc::getFromPointer(L.getPointer() + Offset);
6093 }
6094 
6095 /// parseDirectiveCPU
6096 ///   ::= .cpu id
6097 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6098   SMLoc CurLoc = getLoc();
6099 
6100   StringRef CPU, ExtensionString;
6101   std::tie(CPU, ExtensionString) =
6102       getParser().parseStringToEndOfStatement().trim().split('+');
6103 
6104   if (parseToken(AsmToken::EndOfStatement))
6105     return true;
6106 
6107   SmallVector<StringRef, 4> RequestedExtensions;
6108   if (!ExtensionString.empty())
6109     ExtensionString.split(RequestedExtensions, '+');
6110 
6111   // FIXME This is using tablegen data, but should be moved to ARMTargetParser
6112   // once that is tablegen'ed
6113   if (!getSTI().isCPUStringValid(CPU)) {
6114     Error(CurLoc, "unknown CPU name");
6115     return false;
6116   }
6117 
6118   MCSubtargetInfo &STI = copySTI();
6119   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
6120   CurLoc = incrementLoc(CurLoc, CPU.size());
6121 
6122   ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
6123 
6124   FeatureBitset Features = STI.getFeatureBits();
6125   for (auto Name : RequestedExtensions) {
6126     // Advance source location past '+'.
6127     CurLoc = incrementLoc(CurLoc, 1);
6128 
6129     bool EnableFeature = true;
6130 
6131     if (Name.startswith_insensitive("no")) {
6132       EnableFeature = false;
6133       Name = Name.substr(2);
6134     }
6135 
6136     bool FoundExtension = false;
6137     for (const auto &Extension : ExtensionMap) {
6138       if (Extension.Name != Name)
6139         continue;
6140 
6141       if (Extension.Features.none())
6142         report_fatal_error("unsupported architectural extension: " + Name);
6143 
6144       FeatureBitset ToggleFeatures = EnableFeature
6145                                          ? (~Features & Extension.Features)
6146                                          : ( Features & Extension.Features);
6147       FeatureBitset Features =
6148           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
6149       setAvailableFeatures(Features);
6150       FoundExtension = true;
6151 
6152       break;
6153     }
6154 
6155     if (!FoundExtension)
6156       Error(CurLoc, "unsupported architectural extension");
6157 
6158     CurLoc = incrementLoc(CurLoc, Name.size());
6159   }
6160   return false;
6161 }
6162 
6163 /// parseDirectiveInst
6164 ///  ::= .inst opcode [, ...]
6165 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
6166   if (getLexer().is(AsmToken::EndOfStatement))
6167     return Error(Loc, "expected expression following '.inst' directive");
6168 
6169   auto parseOp = [&]() -> bool {
6170     SMLoc L = getLoc();
6171     const MCExpr *Expr = nullptr;
6172     if (check(getParser().parseExpression(Expr), L, "expected expression"))
6173       return true;
6174     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6175     if (check(!Value, L, "expected constant expression"))
6176       return true;
6177     getTargetStreamer().emitInst(Value->getValue());
6178     return false;
6179   };
6180 
6181   return parseMany(parseOp);
6182 }
6183 
6184 // parseDirectiveTLSDescCall:
6185 //   ::= .tlsdesccall symbol
6186 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
6187   StringRef Name;
6188   if (check(getParser().parseIdentifier(Name), L,
6189             "expected symbol after directive") ||
6190       parseToken(AsmToken::EndOfStatement))
6191     return true;
6192 
6193   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
6194   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
6195   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
6196 
6197   MCInst Inst;
6198   Inst.setOpcode(AArch64::TLSDESCCALL);
6199   Inst.addOperand(MCOperand::createExpr(Expr));
6200 
6201   getParser().getStreamer().emitInstruction(Inst, getSTI());
6202   return false;
6203 }
6204 
6205 /// ::= .loh <lohName | lohId> label1, ..., labelN
6206 /// The number of arguments depends on the loh identifier.
6207 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
6208   MCLOHType Kind;
6209   if (getParser().getTok().isNot(AsmToken::Identifier)) {
6210     if (getParser().getTok().isNot(AsmToken::Integer))
6211       return TokError("expected an identifier or a number in directive");
6212     // We successfully get a numeric value for the identifier.
6213     // Check if it is valid.
6214     int64_t Id = getParser().getTok().getIntVal();
6215     if (Id <= -1U && !isValidMCLOHType(Id))
6216       return TokError("invalid numeric identifier in directive");
6217     Kind = (MCLOHType)Id;
6218   } else {
6219     StringRef Name = getTok().getIdentifier();
6220     // We successfully parse an identifier.
6221     // Check if it is a recognized one.
6222     int Id = MCLOHNameToId(Name);
6223 
6224     if (Id == -1)
6225       return TokError("invalid identifier in directive");
6226     Kind = (MCLOHType)Id;
6227   }
6228   // Consume the identifier.
6229   Lex();
6230   // Get the number of arguments of this LOH.
6231   int NbArgs = MCLOHIdToNbArgs(Kind);
6232 
6233   assert(NbArgs != -1 && "Invalid number of arguments");
6234 
6235   SmallVector<MCSymbol *, 3> Args;
6236   for (int Idx = 0; Idx < NbArgs; ++Idx) {
6237     StringRef Name;
6238     if (getParser().parseIdentifier(Name))
6239       return TokError("expected identifier in directive");
6240     Args.push_back(getContext().getOrCreateSymbol(Name));
6241 
6242     if (Idx + 1 == NbArgs)
6243       break;
6244     if (parseToken(AsmToken::Comma,
6245                    "unexpected token in '" + Twine(IDVal) + "' directive"))
6246       return true;
6247   }
6248   if (parseToken(AsmToken::EndOfStatement,
6249                  "unexpected token in '" + Twine(IDVal) + "' directive"))
6250     return true;
6251 
6252   getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
6253   return false;
6254 }
6255 
6256 /// parseDirectiveLtorg
6257 ///  ::= .ltorg | .pool
6258 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
6259   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
6260     return true;
6261   getTargetStreamer().emitCurrentConstantPool();
6262   return false;
6263 }
6264 
6265 /// parseDirectiveReq
6266 ///  ::= name .req registername
6267 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6268   MCAsmParser &Parser = getParser();
6269   Parser.Lex(); // Eat the '.req' token.
6270   SMLoc SRegLoc = getLoc();
6271   RegKind RegisterKind = RegKind::Scalar;
6272   unsigned RegNum;
6273   OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
6274 
6275   if (ParseRes != MatchOperand_Success) {
6276     StringRef Kind;
6277     RegisterKind = RegKind::NeonVector;
6278     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
6279 
6280     if (ParseRes == MatchOperand_ParseFail)
6281       return true;
6282 
6283     if (ParseRes == MatchOperand_Success && !Kind.empty())
6284       return Error(SRegLoc, "vector register without type specifier expected");
6285   }
6286 
6287   if (ParseRes != MatchOperand_Success) {
6288     StringRef Kind;
6289     RegisterKind = RegKind::SVEDataVector;
6290     ParseRes =
6291         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6292 
6293     if (ParseRes == MatchOperand_ParseFail)
6294       return true;
6295 
6296     if (ParseRes == MatchOperand_Success && !Kind.empty())
6297       return Error(SRegLoc,
6298                    "sve vector register without type specifier expected");
6299   }
6300 
6301   if (ParseRes != MatchOperand_Success) {
6302     StringRef Kind;
6303     RegisterKind = RegKind::SVEPredicateVector;
6304     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
6305 
6306     if (ParseRes == MatchOperand_ParseFail)
6307       return true;
6308 
6309     if (ParseRes == MatchOperand_Success && !Kind.empty())
6310       return Error(SRegLoc,
6311                    "sve predicate register without type specifier expected");
6312   }
6313 
6314   if (ParseRes != MatchOperand_Success)
6315     return Error(SRegLoc, "register name or alias expected");
6316 
6317   // Shouldn't be anything else.
6318   if (parseToken(AsmToken::EndOfStatement,
6319                  "unexpected input in .req directive"))
6320     return true;
6321 
6322   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
6323   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
6324     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
6325 
6326   return false;
6327 }
6328 
6329 /// parseDirectiveUneq
6330 ///  ::= .unreq registername
6331 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
6332   MCAsmParser &Parser = getParser();
6333   if (getTok().isNot(AsmToken::Identifier))
6334     return TokError("unexpected input in .unreq directive.");
6335   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
6336   Parser.Lex(); // Eat the identifier.
6337   return parseToken(AsmToken::EndOfStatement);
6338 }
6339 
6340 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
6341   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
6342     return true;
6343   getStreamer().emitCFINegateRAState();
6344   return false;
6345 }
6346 
6347 /// parseDirectiveCFIBKeyFrame
6348 /// ::= .cfi_b_key
6349 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
6350   if (parseToken(AsmToken::EndOfStatement,
6351                  "unexpected token in '.cfi_b_key_frame'"))
6352     return true;
6353   getStreamer().emitCFIBKeyFrame();
6354   return false;
6355 }
6356 
6357 /// parseDirectiveVariantPCS
6358 /// ::= .variant_pcs symbolname
6359 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
6360   MCAsmParser &Parser = getParser();
6361 
6362   const AsmToken &Tok = Parser.getTok();
6363   if (Tok.isNot(AsmToken::Identifier))
6364     return TokError("expected symbol name");
6365 
6366   StringRef SymbolName = Tok.getIdentifier();
6367 
6368   MCSymbol *Sym = getContext().lookupSymbol(SymbolName);
6369   if (!Sym)
6370     return TokError("unknown symbol");
6371 
6372   Parser.Lex(); // Eat the symbol
6373 
6374   if (parseEOL())
6375     return true;
6376   getTargetStreamer().emitDirectiveVariantPCS(Sym);
6377   return false;
6378 }
6379 
6380 /// parseDirectiveSEHAllocStack
6381 /// ::= .seh_stackalloc
6382 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
6383   int64_t Size;
6384   if (parseImmExpr(Size))
6385     return true;
6386   getTargetStreamer().emitARM64WinCFIAllocStack(Size);
6387   return false;
6388 }
6389 
6390 /// parseDirectiveSEHPrologEnd
6391 /// ::= .seh_endprologue
6392 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
6393   getTargetStreamer().emitARM64WinCFIPrologEnd();
6394   return false;
6395 }
6396 
6397 /// parseDirectiveSEHSaveR19R20X
6398 /// ::= .seh_save_r19r20_x
6399 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
6400   int64_t Offset;
6401   if (parseImmExpr(Offset))
6402     return true;
6403   getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
6404   return false;
6405 }
6406 
6407 /// parseDirectiveSEHSaveFPLR
6408 /// ::= .seh_save_fplr
6409 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
6410   int64_t Offset;
6411   if (parseImmExpr(Offset))
6412     return true;
6413   getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
6414   return false;
6415 }
6416 
6417 /// parseDirectiveSEHSaveFPLRX
6418 /// ::= .seh_save_fplr_x
6419 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
6420   int64_t Offset;
6421   if (parseImmExpr(Offset))
6422     return true;
6423   getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
6424   return false;
6425 }
6426 
6427 /// parseDirectiveSEHSaveReg
6428 /// ::= .seh_save_reg
6429 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
6430   unsigned Reg;
6431   int64_t Offset;
6432   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6433       parseComma() || parseImmExpr(Offset))
6434     return true;
6435   getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
6436   return false;
6437 }
6438 
6439 /// parseDirectiveSEHSaveRegX
6440 /// ::= .seh_save_reg_x
6441 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
6442   unsigned Reg;
6443   int64_t Offset;
6444   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6445       parseComma() || parseImmExpr(Offset))
6446     return true;
6447   getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
6448   return false;
6449 }
6450 
6451 /// parseDirectiveSEHSaveRegP
6452 /// ::= .seh_save_regp
6453 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
6454   unsigned Reg;
6455   int64_t Offset;
6456   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
6457       parseComma() || parseImmExpr(Offset))
6458     return true;
6459   getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
6460   return false;
6461 }
6462 
6463 /// parseDirectiveSEHSaveRegPX
6464 /// ::= .seh_save_regp_x
6465 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
6466   unsigned Reg;
6467   int64_t Offset;
6468   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
6469       parseComma() || parseImmExpr(Offset))
6470     return true;
6471   getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
6472   return false;
6473 }
6474 
6475 /// parseDirectiveSEHSaveLRPair
6476 /// ::= .seh_save_lrpair
6477 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
6478   unsigned Reg;
6479   int64_t Offset;
6480   L = getLoc();
6481   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
6482       parseComma() || parseImmExpr(Offset))
6483     return true;
6484   if (check(((Reg - 19) % 2 != 0), L,
6485             "expected register with even offset from x19"))
6486     return true;
6487   getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
6488   return false;
6489 }
6490 
6491 /// parseDirectiveSEHSaveFReg
6492 /// ::= .seh_save_freg
6493 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
6494   unsigned Reg;
6495   int64_t Offset;
6496   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
6497       parseComma() || parseImmExpr(Offset))
6498     return true;
6499   getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
6500   return false;
6501 }
6502 
6503 /// parseDirectiveSEHSaveFRegX
6504 /// ::= .seh_save_freg_x
6505 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
6506   unsigned Reg;
6507   int64_t Offset;
6508   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
6509       parseComma() || parseImmExpr(Offset))
6510     return true;
6511   getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
6512   return false;
6513 }
6514 
6515 /// parseDirectiveSEHSaveFRegP
6516 /// ::= .seh_save_fregp
6517 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
6518   unsigned Reg;
6519   int64_t Offset;
6520   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
6521       parseComma() || parseImmExpr(Offset))
6522     return true;
6523   getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
6524   return false;
6525 }
6526 
6527 /// parseDirectiveSEHSaveFRegPX
6528 /// ::= .seh_save_fregp_x
6529 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
6530   unsigned Reg;
6531   int64_t Offset;
6532   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
6533       parseComma() || parseImmExpr(Offset))
6534     return true;
6535   getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
6536   return false;
6537 }
6538 
6539 /// parseDirectiveSEHSetFP
6540 /// ::= .seh_set_fp
6541 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
6542   getTargetStreamer().emitARM64WinCFISetFP();
6543   return false;
6544 }
6545 
6546 /// parseDirectiveSEHAddFP
6547 /// ::= .seh_add_fp
6548 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
6549   int64_t Size;
6550   if (parseImmExpr(Size))
6551     return true;
6552   getTargetStreamer().emitARM64WinCFIAddFP(Size);
6553   return false;
6554 }
6555 
6556 /// parseDirectiveSEHNop
6557 /// ::= .seh_nop
6558 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
6559   getTargetStreamer().emitARM64WinCFINop();
6560   return false;
6561 }
6562 
6563 /// parseDirectiveSEHSaveNext
6564 /// ::= .seh_save_next
6565 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
6566   getTargetStreamer().emitARM64WinCFISaveNext();
6567   return false;
6568 }
6569 
6570 /// parseDirectiveSEHEpilogStart
6571 /// ::= .seh_startepilogue
6572 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
6573   getTargetStreamer().emitARM64WinCFIEpilogStart();
6574   return false;
6575 }
6576 
6577 /// parseDirectiveSEHEpilogEnd
6578 /// ::= .seh_endepilogue
6579 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
6580   getTargetStreamer().emitARM64WinCFIEpilogEnd();
6581   return false;
6582 }
6583 
6584 /// parseDirectiveSEHTrapFrame
6585 /// ::= .seh_trap_frame
6586 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
6587   getTargetStreamer().emitARM64WinCFITrapFrame();
6588   return false;
6589 }
6590 
6591 /// parseDirectiveSEHMachineFrame
6592 /// ::= .seh_pushframe
6593 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
6594   getTargetStreamer().emitARM64WinCFIMachineFrame();
6595   return false;
6596 }
6597 
6598 /// parseDirectiveSEHContext
6599 /// ::= .seh_context
6600 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
6601   getTargetStreamer().emitARM64WinCFIContext();
6602   return false;
6603 }
6604 
6605 /// parseDirectiveSEHClearUnwoundToCall
6606 /// ::= .seh_clear_unwound_to_call
6607 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
6608   getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
6609   return false;
6610 }
6611 
6612 bool
6613 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
6614                                     AArch64MCExpr::VariantKind &ELFRefKind,
6615                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
6616                                     int64_t &Addend) {
6617   ELFRefKind = AArch64MCExpr::VK_INVALID;
6618   DarwinRefKind = MCSymbolRefExpr::VK_None;
6619   Addend = 0;
6620 
6621   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
6622     ELFRefKind = AE->getKind();
6623     Expr = AE->getSubExpr();
6624   }
6625 
6626   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
6627   if (SE) {
6628     // It's a simple symbol reference with no addend.
6629     DarwinRefKind = SE->getKind();
6630     return true;
6631   }
6632 
6633   // Check that it looks like a symbol + an addend
6634   MCValue Res;
6635   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
6636   if (!Relocatable || Res.getSymB())
6637     return false;
6638 
6639   // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
6640   // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
6641   if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
6642     return false;
6643 
6644   if (Res.getSymA())
6645     DarwinRefKind = Res.getSymA()->getKind();
6646   Addend = Res.getConstant();
6647 
6648   // It's some symbol reference + a constant addend, but really
6649   // shouldn't use both Darwin and ELF syntax.
6650   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
6651          DarwinRefKind == MCSymbolRefExpr::VK_None;
6652 }
6653 
6654 /// Force static initialization.
6655 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
6656   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
6657   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
6658   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
6659   RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
6660   RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
6661 }
6662 
6663 #define GET_REGISTER_MATCHER
6664 #define GET_SUBTARGET_FEATURE_NAME
6665 #define GET_MATCHER_IMPLEMENTATION
6666 #define GET_MNEMONIC_SPELL_CHECKER
6667 #include "AArch64GenAsmMatcher.inc"
6668 
6669 // Define this matcher function after the auto-generated include so we
6670 // have the match class enum definitions.
6671 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
6672                                                       unsigned Kind) {
6673   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
6674   // If the kind is a token for a literal immediate, check if our asm
6675   // operand matches. This is for InstAliases which have a fixed-value
6676   // immediate in the syntax.
6677   int64_t ExpectedVal;
6678   switch (Kind) {
6679   default:
6680     return Match_InvalidOperand;
6681   case MCK__HASH_0:
6682     ExpectedVal = 0;
6683     break;
6684   case MCK__HASH_1:
6685     ExpectedVal = 1;
6686     break;
6687   case MCK__HASH_12:
6688     ExpectedVal = 12;
6689     break;
6690   case MCK__HASH_16:
6691     ExpectedVal = 16;
6692     break;
6693   case MCK__HASH_2:
6694     ExpectedVal = 2;
6695     break;
6696   case MCK__HASH_24:
6697     ExpectedVal = 24;
6698     break;
6699   case MCK__HASH_3:
6700     ExpectedVal = 3;
6701     break;
6702   case MCK__HASH_32:
6703     ExpectedVal = 32;
6704     break;
6705   case MCK__HASH_4:
6706     ExpectedVal = 4;
6707     break;
6708   case MCK__HASH_48:
6709     ExpectedVal = 48;
6710     break;
6711   case MCK__HASH_6:
6712     ExpectedVal = 6;
6713     break;
6714   case MCK__HASH_64:
6715     ExpectedVal = 64;
6716     break;
6717   case MCK__HASH_8:
6718     ExpectedVal = 8;
6719     break;
6720   case MCK_MPR:
6721     // If the Kind is a token for the MPR register class which has the "za"
6722     // register (SME accumulator array), check if the asm is a literal "za"
6723     // token. This is for the "smstart za" alias that defines the register
6724     // as a literal token.
6725     if (Op.isTokenEqual("za"))
6726       return Match_Success;
6727     break;
6728   }
6729   if (!Op.isImm())
6730     return Match_InvalidOperand;
6731   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6732   if (!CE)
6733     return Match_InvalidOperand;
6734   if (CE->getValue() == ExpectedVal)
6735     return Match_Success;
6736   return Match_InvalidOperand;
6737 }
6738 
6739 OperandMatchResultTy
6740 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
6741 
6742   SMLoc S = getLoc();
6743 
6744   if (getParser().getTok().isNot(AsmToken::Identifier)) {
6745     Error(S, "expected register");
6746     return MatchOperand_ParseFail;
6747   }
6748 
6749   unsigned FirstReg;
6750   OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
6751   if (Res != MatchOperand_Success)
6752     return MatchOperand_ParseFail;
6753 
6754   const MCRegisterClass &WRegClass =
6755       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
6756   const MCRegisterClass &XRegClass =
6757       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
6758 
6759   bool isXReg = XRegClass.contains(FirstReg),
6760        isWReg = WRegClass.contains(FirstReg);
6761   if (!isXReg && !isWReg) {
6762     Error(S, "expected first even register of a "
6763              "consecutive same-size even/odd register pair");
6764     return MatchOperand_ParseFail;
6765   }
6766 
6767   const MCRegisterInfo *RI = getContext().getRegisterInfo();
6768   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
6769 
6770   if (FirstEncoding & 0x1) {
6771     Error(S, "expected first even register of a "
6772              "consecutive same-size even/odd register pair");
6773     return MatchOperand_ParseFail;
6774   }
6775 
6776   if (getParser().getTok().isNot(AsmToken::Comma)) {
6777     Error(getLoc(), "expected comma");
6778     return MatchOperand_ParseFail;
6779   }
6780   // Eat the comma
6781   getParser().Lex();
6782 
6783   SMLoc E = getLoc();
6784   unsigned SecondReg;
6785   Res = tryParseScalarRegister(SecondReg);
6786   if (Res != MatchOperand_Success)
6787     return MatchOperand_ParseFail;
6788 
6789   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
6790       (isXReg && !XRegClass.contains(SecondReg)) ||
6791       (isWReg && !WRegClass.contains(SecondReg))) {
6792     Error(E,"expected second odd register of a "
6793              "consecutive same-size even/odd register pair");
6794     return MatchOperand_ParseFail;
6795   }
6796 
6797   unsigned Pair = 0;
6798   if (isXReg) {
6799     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
6800            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
6801   } else {
6802     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
6803            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
6804   }
6805 
6806   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
6807       getLoc(), getContext()));
6808 
6809   return MatchOperand_Success;
6810 }
6811 
6812 template <bool ParseShiftExtend, bool ParseSuffix>
6813 OperandMatchResultTy
6814 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
6815   const SMLoc S = getLoc();
6816   // Check for a SVE vector register specifier first.
6817   unsigned RegNum;
6818   StringRef Kind;
6819 
6820   OperandMatchResultTy Res =
6821       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
6822 
6823   if (Res != MatchOperand_Success)
6824     return Res;
6825 
6826   if (ParseSuffix && Kind.empty())
6827     return MatchOperand_NoMatch;
6828 
6829   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
6830   if (!KindRes)
6831     return MatchOperand_NoMatch;
6832 
6833   unsigned ElementWidth = KindRes->second;
6834 
6835   // No shift/extend is the default.
6836   if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
6837     Operands.push_back(AArch64Operand::CreateVectorReg(
6838         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
6839 
6840     OperandMatchResultTy Res = tryParseVectorIndex(Operands);
6841     if (Res == MatchOperand_ParseFail)
6842       return MatchOperand_ParseFail;
6843     return MatchOperand_Success;
6844   }
6845 
6846   // Eat the comma
6847   getParser().Lex();
6848 
6849   // Match the shift
6850   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
6851   Res = tryParseOptionalShiftExtend(ExtOpnd);
6852   if (Res != MatchOperand_Success)
6853     return Res;
6854 
6855   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
6856   Operands.push_back(AArch64Operand::CreateVectorReg(
6857       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
6858       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
6859       Ext->hasShiftExtendAmount()));
6860 
6861   return MatchOperand_Success;
6862 }
6863 
6864 OperandMatchResultTy
6865 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
6866   MCAsmParser &Parser = getParser();
6867 
6868   SMLoc SS = getLoc();
6869   const AsmToken &TokE = Parser.getTok();
6870   bool IsHash = TokE.is(AsmToken::Hash);
6871 
6872   if (!IsHash && TokE.isNot(AsmToken::Identifier))
6873     return MatchOperand_NoMatch;
6874 
6875   int64_t Pattern;
6876   if (IsHash) {
6877     Parser.Lex(); // Eat hash
6878 
6879     // Parse the immediate operand.
6880     const MCExpr *ImmVal;
6881     SS = getLoc();
6882     if (Parser.parseExpression(ImmVal))
6883       return MatchOperand_ParseFail;
6884 
6885     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
6886     if (!MCE)
6887       return MatchOperand_ParseFail;
6888 
6889     Pattern = MCE->getValue();
6890   } else {
6891     // Parse the pattern
6892     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
6893     if (!Pat)
6894       return MatchOperand_NoMatch;
6895 
6896     Parser.Lex();
6897     Pattern = Pat->Encoding;
6898     assert(Pattern >= 0 && Pattern < 32);
6899   }
6900 
6901   Operands.push_back(
6902       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
6903                                 SS, getLoc(), getContext()));
6904 
6905   return MatchOperand_Success;
6906 }
6907 
6908 OperandMatchResultTy
6909 AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
6910   SMLoc SS = getLoc();
6911 
6912   unsigned XReg;
6913   if (tryParseScalarRegister(XReg) != MatchOperand_Success)
6914     return MatchOperand_NoMatch;
6915 
6916   MCContext &ctx = getContext();
6917   const MCRegisterInfo *RI = ctx.getRegisterInfo();
6918   int X8Reg = RI->getMatchingSuperReg(
6919       XReg, AArch64::x8sub_0,
6920       &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
6921   if (!X8Reg) {
6922     Error(SS, "expected an even-numbered x-register in the range [x0,x22]");
6923     return MatchOperand_ParseFail;
6924   }
6925 
6926   Operands.push_back(
6927       AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
6928   return MatchOperand_Success;
6929 }
6930