1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64InstrInfo.h"
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64InstPrinter.h"
12 #include "MCTargetDesc/AArch64MCExpr.h"
13 #include "MCTargetDesc/AArch64MCTargetDesc.h"
14 #include "MCTargetDesc/AArch64TargetStreamer.h"
15 #include "TargetInfo/AArch64TargetInfo.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCLinkerOptimizationHint.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
37 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCStreamer.h"
40 #include "llvm/MC/MCSubtargetInfo.h"
41 #include "llvm/MC/MCSymbol.h"
42 #include "llvm/MC/MCTargetOptions.h"
43 #include "llvm/MC/MCValue.h"
44 #include "llvm/MC/TargetRegistry.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/SMLoc.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/TargetParser/AArch64TargetParser.h"
52 #include "llvm/TargetParser/SubtargetFeature.h"
53 #include <cassert>
54 #include <cctype>
55 #include <cstdint>
56 #include <cstdio>
57 #include <optional>
58 #include <string>
59 #include <tuple>
60 #include <utility>
61 #include <vector>
62 
63 using namespace llvm;
64 
65 namespace {
66 
67 enum class RegKind {
68   Scalar,
69   NeonVector,
70   SVEDataVector,
71   SVEPredicateAsCounter,
72   SVEPredicateVector,
73   Matrix,
74   LookupTable
75 };
76 
77 enum class MatrixKind { Array, Tile, Row, Col };
78 
79 enum RegConstraintEqualityTy {
80   EqualsReg,
81   EqualsSuperReg,
82   EqualsSubReg
83 };
84 
85 class AArch64AsmParser : public MCTargetAsmParser {
86 private:
87   StringRef Mnemonic; ///< Instruction mnemonic.
88 
89   // Map of register aliases registers via the .req directive.
90   StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
91 
92   class PrefixInfo {
93   public:
94     static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
95       PrefixInfo Prefix;
96       switch (Inst.getOpcode()) {
97       case AArch64::MOVPRFX_ZZ:
98         Prefix.Active = true;
99         Prefix.Dst = Inst.getOperand(0).getReg();
100         break;
101       case AArch64::MOVPRFX_ZPmZ_B:
102       case AArch64::MOVPRFX_ZPmZ_H:
103       case AArch64::MOVPRFX_ZPmZ_S:
104       case AArch64::MOVPRFX_ZPmZ_D:
105         Prefix.Active = true;
106         Prefix.Predicated = true;
107         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
108         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
109                "No destructive element size set for movprfx");
110         Prefix.Dst = Inst.getOperand(0).getReg();
111         Prefix.Pg = Inst.getOperand(2).getReg();
112         break;
113       case AArch64::MOVPRFX_ZPzZ_B:
114       case AArch64::MOVPRFX_ZPzZ_H:
115       case AArch64::MOVPRFX_ZPzZ_S:
116       case AArch64::MOVPRFX_ZPzZ_D:
117         Prefix.Active = true;
118         Prefix.Predicated = true;
119         Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
120         assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
121                "No destructive element size set for movprfx");
122         Prefix.Dst = Inst.getOperand(0).getReg();
123         Prefix.Pg = Inst.getOperand(1).getReg();
124         break;
125       default:
126         break;
127       }
128 
129       return Prefix;
130     }
131 
132     PrefixInfo() = default;
133     bool isActive() const { return Active; }
134     bool isPredicated() const { return Predicated; }
135     unsigned getElementSize() const {
136       assert(Predicated);
137       return ElementSize;
138     }
139     unsigned getDstReg() const { return Dst; }
140     unsigned getPgReg() const {
141       assert(Predicated);
142       return Pg;
143     }
144 
145   private:
146     bool Active = false;
147     bool Predicated = false;
148     unsigned ElementSize;
149     unsigned Dst;
150     unsigned Pg;
151   } NextPrefix;
152 
153   AArch64TargetStreamer &getTargetStreamer() {
154     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
155     return static_cast<AArch64TargetStreamer &>(TS);
156   }
157 
158   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
159 
160   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
161   bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162   void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
163   AArch64CC::CondCode parseCondCodeString(StringRef Cond,
164                                           std::string &Suggestion);
165   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
166   unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
167   bool parseRegister(OperandVector &Operands);
168   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
169   bool parseNeonVectorList(OperandVector &Operands);
170   bool parseOptionalMulOperand(OperandVector &Operands);
171   bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
172   bool parseKeywordOperand(OperandVector &Operands);
173   bool parseOperand(OperandVector &Operands, bool isCondCode,
174                     bool invertCondCode);
175   bool parseImmExpr(int64_t &Out);
176   bool parseComma();
177   bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
178                             unsigned Last);
179 
180   bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
181                       OperandVector &Operands);
182 
183   bool parseDirectiveArch(SMLoc L);
184   bool parseDirectiveArchExtension(SMLoc L);
185   bool parseDirectiveCPU(SMLoc L);
186   bool parseDirectiveInst(SMLoc L);
187 
188   bool parseDirectiveTLSDescCall(SMLoc L);
189 
190   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
191   bool parseDirectiveLtorg(SMLoc L);
192 
193   bool parseDirectiveReq(StringRef Name, SMLoc L);
194   bool parseDirectiveUnreq(SMLoc L);
195   bool parseDirectiveCFINegateRAState();
196   bool parseDirectiveCFIBKeyFrame();
197   bool parseDirectiveCFIMTETaggedFrame();
198 
199   bool parseDirectiveVariantPCS(SMLoc L);
200 
201   bool parseDirectiveSEHAllocStack(SMLoc L);
202   bool parseDirectiveSEHPrologEnd(SMLoc L);
203   bool parseDirectiveSEHSaveR19R20X(SMLoc L);
204   bool parseDirectiveSEHSaveFPLR(SMLoc L);
205   bool parseDirectiveSEHSaveFPLRX(SMLoc L);
206   bool parseDirectiveSEHSaveReg(SMLoc L);
207   bool parseDirectiveSEHSaveRegX(SMLoc L);
208   bool parseDirectiveSEHSaveRegP(SMLoc L);
209   bool parseDirectiveSEHSaveRegPX(SMLoc L);
210   bool parseDirectiveSEHSaveLRPair(SMLoc L);
211   bool parseDirectiveSEHSaveFReg(SMLoc L);
212   bool parseDirectiveSEHSaveFRegX(SMLoc L);
213   bool parseDirectiveSEHSaveFRegP(SMLoc L);
214   bool parseDirectiveSEHSaveFRegPX(SMLoc L);
215   bool parseDirectiveSEHSetFP(SMLoc L);
216   bool parseDirectiveSEHAddFP(SMLoc L);
217   bool parseDirectiveSEHNop(SMLoc L);
218   bool parseDirectiveSEHSaveNext(SMLoc L);
219   bool parseDirectiveSEHEpilogStart(SMLoc L);
220   bool parseDirectiveSEHEpilogEnd(SMLoc L);
221   bool parseDirectiveSEHTrapFrame(SMLoc L);
222   bool parseDirectiveSEHMachineFrame(SMLoc L);
223   bool parseDirectiveSEHContext(SMLoc L);
224   bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
225   bool parseDirectiveSEHPACSignLR(SMLoc L);
226   bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
227 
228   bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
229                            SmallVectorImpl<SMLoc> &Loc);
230   unsigned getNumRegsForRegKind(RegKind K);
231   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
232                                OperandVector &Operands, MCStreamer &Out,
233                                uint64_t &ErrorInfo,
234                                bool MatchingInlineAsm) override;
235 /// @name Auto-generated Match Functions
236 /// {
237 
238 #define GET_ASSEMBLER_HEADER
239 #include "AArch64GenAsmMatcher.inc"
240 
241   /// }
242 
243   ParseStatus tryParseScalarRegister(MCRegister &Reg);
244   ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
245                                      RegKind MatchKind);
246   ParseStatus tryParseMatrixRegister(OperandVector &Operands);
247   ParseStatus tryParseSVCR(OperandVector &Operands);
248   ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
249   ParseStatus tryParseBarrierOperand(OperandVector &Operands);
250   ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
251   ParseStatus tryParseSysReg(OperandVector &Operands);
252   ParseStatus tryParseSysCROperand(OperandVector &Operands);
253   template <bool IsSVEPrefetch = false>
254   ParseStatus tryParsePrefetch(OperandVector &Operands);
255   ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
256   ParseStatus tryParsePSBHint(OperandVector &Operands);
257   ParseStatus tryParseBTIHint(OperandVector &Operands);
258   ParseStatus tryParseAdrpLabel(OperandVector &Operands);
259   ParseStatus tryParseAdrLabel(OperandVector &Operands);
260   template <bool AddFPZeroAsLiteral>
261   ParseStatus tryParseFPImm(OperandVector &Operands);
262   ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
263   ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
264   bool tryParseNeonVectorRegister(OperandVector &Operands);
265   ParseStatus tryParseVectorIndex(OperandVector &Operands);
266   ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
267   ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
268   template <bool ParseShiftExtend,
269             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
270   ParseStatus tryParseGPROperand(OperandVector &Operands);
271   ParseStatus tryParseZTOperand(OperandVector &Operands);
272   template <bool ParseShiftExtend, bool ParseSuffix>
273   ParseStatus tryParseSVEDataVector(OperandVector &Operands);
274   template <RegKind RK>
275   ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
276   template <RegKind VectorKind>
277   ParseStatus tryParseVectorList(OperandVector &Operands,
278                                  bool ExpectMatch = false);
279   ParseStatus tryParseMatrixTileList(OperandVector &Operands);
280   ParseStatus tryParseSVEPattern(OperandVector &Operands);
281   ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
282   ParseStatus tryParseGPR64x8(OperandVector &Operands);
283   ParseStatus tryParseImmRange(OperandVector &Operands);
284 
285 public:
286   enum AArch64MatchResultTy {
287     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
288 #define GET_OPERAND_DIAGNOSTIC_TYPES
289 #include "AArch64GenAsmMatcher.inc"
290   };
291   bool IsILP32;
292 
293   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
294                    const MCInstrInfo &MII, const MCTargetOptions &Options)
295     : MCTargetAsmParser(Options, STI, MII) {
296     IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
297     MCAsmParserExtension::Initialize(Parser);
298     MCStreamer &S = getParser().getStreamer();
299     if (S.getTargetStreamer() == nullptr)
300       new AArch64TargetStreamer(S);
301 
302     // Alias .hword/.word/.[dx]word to the target-independent
303     // .2byte/.4byte/.8byte directives as they have the same form and
304     // semantics:
305     ///  ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
306     Parser.addAliasForDirective(".hword", ".2byte");
307     Parser.addAliasForDirective(".word", ".4byte");
308     Parser.addAliasForDirective(".dword", ".8byte");
309     Parser.addAliasForDirective(".xword", ".8byte");
310 
311     // Initialize the set of available features.
312     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
313   }
314 
315   bool areEqualRegs(const MCParsedAsmOperand &Op1,
316                     const MCParsedAsmOperand &Op2) const override;
317   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
318                         SMLoc NameLoc, OperandVector &Operands) override;
319   bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
320                      SMLoc &EndLoc) override;
321   OperandMatchResultTy tryParseRegister(MCRegister &RegNo, SMLoc &StartLoc,
322                                         SMLoc &EndLoc) override;
323   bool ParseDirective(AsmToken DirectiveID) override;
324   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
325                                       unsigned Kind) override;
326 
327   static bool classifySymbolRef(const MCExpr *Expr,
328                                 AArch64MCExpr::VariantKind &ELFRefKind,
329                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
330                                 int64_t &Addend);
331 };
332 
333 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
334 /// instruction.
335 class AArch64Operand : public MCParsedAsmOperand {
336 private:
337   enum KindTy {
338     k_Immediate,
339     k_ShiftedImm,
340     k_ImmRange,
341     k_CondCode,
342     k_Register,
343     k_MatrixRegister,
344     k_MatrixTileList,
345     k_SVCR,
346     k_VectorList,
347     k_VectorIndex,
348     k_Token,
349     k_SysReg,
350     k_SysCR,
351     k_Prefetch,
352     k_ShiftExtend,
353     k_FPImm,
354     k_Barrier,
355     k_PSBHint,
356     k_BTIHint,
357   } Kind;
358 
359   SMLoc StartLoc, EndLoc;
360 
361   struct TokOp {
362     const char *Data;
363     unsigned Length;
364     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
365   };
366 
367   // Separate shift/extend operand.
368   struct ShiftExtendOp {
369     AArch64_AM::ShiftExtendType Type;
370     unsigned Amount;
371     bool HasExplicitAmount;
372   };
373 
374   struct RegOp {
375     unsigned RegNum;
376     RegKind Kind;
377     int ElementWidth;
378 
379     // The register may be allowed as a different register class,
380     // e.g. for GPR64as32 or GPR32as64.
381     RegConstraintEqualityTy EqualityTy;
382 
383     // In some cases the shift/extend needs to be explicitly parsed together
384     // with the register, rather than as a separate operand. This is needed
385     // for addressing modes where the instruction as a whole dictates the
386     // scaling/extend, rather than specific bits in the instruction.
387     // By parsing them as a single operand, we avoid the need to pass an
388     // extra operand in all CodeGen patterns (because all operands need to
389     // have an associated value), and we avoid the need to update TableGen to
390     // accept operands that have no associated bits in the instruction.
391     //
392     // An added benefit of parsing them together is that the assembler
393     // can give a sensible diagnostic if the scaling is not correct.
394     //
395     // The default is 'lsl #0' (HasExplicitAmount = false) if no
396     // ShiftExtend is specified.
397     ShiftExtendOp ShiftExtend;
398   };
399 
400   struct MatrixRegOp {
401     unsigned RegNum;
402     unsigned ElementWidth;
403     MatrixKind Kind;
404   };
405 
406   struct MatrixTileListOp {
407     unsigned RegMask = 0;
408   };
409 
410   struct VectorListOp {
411     unsigned RegNum;
412     unsigned Count;
413     unsigned Stride;
414     unsigned NumElements;
415     unsigned ElementWidth;
416     RegKind  RegisterKind;
417   };
418 
419   struct VectorIndexOp {
420     int Val;
421   };
422 
423   struct ImmOp {
424     const MCExpr *Val;
425   };
426 
427   struct ShiftedImmOp {
428     const MCExpr *Val;
429     unsigned ShiftAmount;
430   };
431 
432   struct ImmRangeOp {
433     unsigned First;
434     unsigned Last;
435   };
436 
437   struct CondCodeOp {
438     AArch64CC::CondCode Code;
439   };
440 
441   struct FPImmOp {
442     uint64_t Val; // APFloat value bitcasted to uint64_t.
443     bool IsExact; // describes whether parsed value was exact.
444   };
445 
446   struct BarrierOp {
447     const char *Data;
448     unsigned Length;
449     unsigned Val; // Not the enum since not all values have names.
450     bool HasnXSModifier;
451   };
452 
453   struct SysRegOp {
454     const char *Data;
455     unsigned Length;
456     uint32_t MRSReg;
457     uint32_t MSRReg;
458     uint32_t PStateField;
459   };
460 
461   struct SysCRImmOp {
462     unsigned Val;
463   };
464 
465   struct PrefetchOp {
466     const char *Data;
467     unsigned Length;
468     unsigned Val;
469   };
470 
471   struct PSBHintOp {
472     const char *Data;
473     unsigned Length;
474     unsigned Val;
475   };
476 
477   struct BTIHintOp {
478     const char *Data;
479     unsigned Length;
480     unsigned Val;
481   };
482 
483   struct SVCROp {
484     const char *Data;
485     unsigned Length;
486     unsigned PStateField;
487   };
488 
489   union {
490     struct TokOp Tok;
491     struct RegOp Reg;
492     struct MatrixRegOp MatrixReg;
493     struct MatrixTileListOp MatrixTileList;
494     struct VectorListOp VectorList;
495     struct VectorIndexOp VectorIndex;
496     struct ImmOp Imm;
497     struct ShiftedImmOp ShiftedImm;
498     struct ImmRangeOp ImmRange;
499     struct CondCodeOp CondCode;
500     struct FPImmOp FPImm;
501     struct BarrierOp Barrier;
502     struct SysRegOp SysReg;
503     struct SysCRImmOp SysCRImm;
504     struct PrefetchOp Prefetch;
505     struct PSBHintOp PSBHint;
506     struct BTIHintOp BTIHint;
507     struct ShiftExtendOp ShiftExtend;
508     struct SVCROp SVCR;
509   };
510 
511   // Keep the MCContext around as the MCExprs may need manipulated during
512   // the add<>Operands() calls.
513   MCContext &Ctx;
514 
515 public:
516   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
517 
518   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
519     Kind = o.Kind;
520     StartLoc = o.StartLoc;
521     EndLoc = o.EndLoc;
522     switch (Kind) {
523     case k_Token:
524       Tok = o.Tok;
525       break;
526     case k_Immediate:
527       Imm = o.Imm;
528       break;
529     case k_ShiftedImm:
530       ShiftedImm = o.ShiftedImm;
531       break;
532     case k_ImmRange:
533       ImmRange = o.ImmRange;
534       break;
535     case k_CondCode:
536       CondCode = o.CondCode;
537       break;
538     case k_FPImm:
539       FPImm = o.FPImm;
540       break;
541     case k_Barrier:
542       Barrier = o.Barrier;
543       break;
544     case k_Register:
545       Reg = o.Reg;
546       break;
547     case k_MatrixRegister:
548       MatrixReg = o.MatrixReg;
549       break;
550     case k_MatrixTileList:
551       MatrixTileList = o.MatrixTileList;
552       break;
553     case k_VectorList:
554       VectorList = o.VectorList;
555       break;
556     case k_VectorIndex:
557       VectorIndex = o.VectorIndex;
558       break;
559     case k_SysReg:
560       SysReg = o.SysReg;
561       break;
562     case k_SysCR:
563       SysCRImm = o.SysCRImm;
564       break;
565     case k_Prefetch:
566       Prefetch = o.Prefetch;
567       break;
568     case k_PSBHint:
569       PSBHint = o.PSBHint;
570       break;
571     case k_BTIHint:
572       BTIHint = o.BTIHint;
573       break;
574     case k_ShiftExtend:
575       ShiftExtend = o.ShiftExtend;
576       break;
577     case k_SVCR:
578       SVCR = o.SVCR;
579       break;
580     }
581   }
582 
583   /// getStartLoc - Get the location of the first token of this operand.
584   SMLoc getStartLoc() const override { return StartLoc; }
585   /// getEndLoc - Get the location of the last token of this operand.
586   SMLoc getEndLoc() const override { return EndLoc; }
587 
588   StringRef getToken() const {
589     assert(Kind == k_Token && "Invalid access!");
590     return StringRef(Tok.Data, Tok.Length);
591   }
592 
593   bool isTokenSuffix() const {
594     assert(Kind == k_Token && "Invalid access!");
595     return Tok.IsSuffix;
596   }
597 
598   const MCExpr *getImm() const {
599     assert(Kind == k_Immediate && "Invalid access!");
600     return Imm.Val;
601   }
602 
603   const MCExpr *getShiftedImmVal() const {
604     assert(Kind == k_ShiftedImm && "Invalid access!");
605     return ShiftedImm.Val;
606   }
607 
608   unsigned getShiftedImmShift() const {
609     assert(Kind == k_ShiftedImm && "Invalid access!");
610     return ShiftedImm.ShiftAmount;
611   }
612 
613   unsigned getFirstImmVal() const {
614     assert(Kind == k_ImmRange && "Invalid access!");
615     return ImmRange.First;
616   }
617 
618   unsigned getLastImmVal() const {
619     assert(Kind == k_ImmRange && "Invalid access!");
620     return ImmRange.Last;
621   }
622 
623   AArch64CC::CondCode getCondCode() const {
624     assert(Kind == k_CondCode && "Invalid access!");
625     return CondCode.Code;
626   }
627 
628   APFloat getFPImm() const {
629     assert (Kind == k_FPImm && "Invalid access!");
630     return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
631   }
632 
633   bool getFPImmIsExact() const {
634     assert (Kind == k_FPImm && "Invalid access!");
635     return FPImm.IsExact;
636   }
637 
638   unsigned getBarrier() const {
639     assert(Kind == k_Barrier && "Invalid access!");
640     return Barrier.Val;
641   }
642 
643   StringRef getBarrierName() const {
644     assert(Kind == k_Barrier && "Invalid access!");
645     return StringRef(Barrier.Data, Barrier.Length);
646   }
647 
648   bool getBarriernXSModifier() const {
649     assert(Kind == k_Barrier && "Invalid access!");
650     return Barrier.HasnXSModifier;
651   }
652 
653   unsigned getReg() const override {
654     assert(Kind == k_Register && "Invalid access!");
655     return Reg.RegNum;
656   }
657 
658   unsigned getMatrixReg() const {
659     assert(Kind == k_MatrixRegister && "Invalid access!");
660     return MatrixReg.RegNum;
661   }
662 
663   unsigned getMatrixElementWidth() const {
664     assert(Kind == k_MatrixRegister && "Invalid access!");
665     return MatrixReg.ElementWidth;
666   }
667 
668   MatrixKind getMatrixKind() const {
669     assert(Kind == k_MatrixRegister && "Invalid access!");
670     return MatrixReg.Kind;
671   }
672 
673   unsigned getMatrixTileListRegMask() const {
674     assert(isMatrixTileList() && "Invalid access!");
675     return MatrixTileList.RegMask;
676   }
677 
678   RegConstraintEqualityTy getRegEqualityTy() const {
679     assert(Kind == k_Register && "Invalid access!");
680     return Reg.EqualityTy;
681   }
682 
683   unsigned getVectorListStart() const {
684     assert(Kind == k_VectorList && "Invalid access!");
685     return VectorList.RegNum;
686   }
687 
688   unsigned getVectorListCount() const {
689     assert(Kind == k_VectorList && "Invalid access!");
690     return VectorList.Count;
691   }
692 
693   unsigned getVectorListStride() const {
694     assert(Kind == k_VectorList && "Invalid access!");
695     return VectorList.Stride;
696   }
697 
698   int getVectorIndex() const {
699     assert(Kind == k_VectorIndex && "Invalid access!");
700     return VectorIndex.Val;
701   }
702 
703   StringRef getSysReg() const {
704     assert(Kind == k_SysReg && "Invalid access!");
705     return StringRef(SysReg.Data, SysReg.Length);
706   }
707 
708   unsigned getSysCR() const {
709     assert(Kind == k_SysCR && "Invalid access!");
710     return SysCRImm.Val;
711   }
712 
713   unsigned getPrefetch() const {
714     assert(Kind == k_Prefetch && "Invalid access!");
715     return Prefetch.Val;
716   }
717 
718   unsigned getPSBHint() const {
719     assert(Kind == k_PSBHint && "Invalid access!");
720     return PSBHint.Val;
721   }
722 
723   StringRef getPSBHintName() const {
724     assert(Kind == k_PSBHint && "Invalid access!");
725     return StringRef(PSBHint.Data, PSBHint.Length);
726   }
727 
728   unsigned getBTIHint() const {
729     assert(Kind == k_BTIHint && "Invalid access!");
730     return BTIHint.Val;
731   }
732 
733   StringRef getBTIHintName() const {
734     assert(Kind == k_BTIHint && "Invalid access!");
735     return StringRef(BTIHint.Data, BTIHint.Length);
736   }
737 
738   StringRef getSVCR() const {
739     assert(Kind == k_SVCR && "Invalid access!");
740     return StringRef(SVCR.Data, SVCR.Length);
741   }
742 
743   StringRef getPrefetchName() const {
744     assert(Kind == k_Prefetch && "Invalid access!");
745     return StringRef(Prefetch.Data, Prefetch.Length);
746   }
747 
748   AArch64_AM::ShiftExtendType getShiftExtendType() const {
749     if (Kind == k_ShiftExtend)
750       return ShiftExtend.Type;
751     if (Kind == k_Register)
752       return Reg.ShiftExtend.Type;
753     llvm_unreachable("Invalid access!");
754   }
755 
756   unsigned getShiftExtendAmount() const {
757     if (Kind == k_ShiftExtend)
758       return ShiftExtend.Amount;
759     if (Kind == k_Register)
760       return Reg.ShiftExtend.Amount;
761     llvm_unreachable("Invalid access!");
762   }
763 
764   bool hasShiftExtendAmount() const {
765     if (Kind == k_ShiftExtend)
766       return ShiftExtend.HasExplicitAmount;
767     if (Kind == k_Register)
768       return Reg.ShiftExtend.HasExplicitAmount;
769     llvm_unreachable("Invalid access!");
770   }
771 
772   bool isImm() const override { return Kind == k_Immediate; }
773   bool isMem() const override { return false; }
774 
775   bool isUImm6() const {
776     if (!isImm())
777       return false;
778     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
779     if (!MCE)
780       return false;
781     int64_t Val = MCE->getValue();
782     return (Val >= 0 && Val < 64);
783   }
784 
785   template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
786 
787   template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
788     return isImmScaled<Bits, Scale>(true);
789   }
790 
791   template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
792   DiagnosticPredicate isUImmScaled() const {
793     if (IsRange && isImmRange() &&
794         (getLastImmVal() != getFirstImmVal() + Offset))
795       return DiagnosticPredicateTy::NoMatch;
796 
797     return isImmScaled<Bits, Scale, IsRange>(false);
798   }
799 
800   template <int Bits, int Scale, bool IsRange = false>
801   DiagnosticPredicate isImmScaled(bool Signed) const {
802     if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
803         (isImmRange() && !IsRange))
804       return DiagnosticPredicateTy::NoMatch;
805 
806     int64_t Val;
807     if (isImmRange())
808       Val = getFirstImmVal();
809     else {
810       const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
811       if (!MCE)
812         return DiagnosticPredicateTy::NoMatch;
813       Val = MCE->getValue();
814     }
815 
816     int64_t MinVal, MaxVal;
817     if (Signed) {
818       int64_t Shift = Bits - 1;
819       MinVal = (int64_t(1) << Shift) * -Scale;
820       MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
821     } else {
822       MinVal = 0;
823       MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
824     }
825 
826     if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
827       return DiagnosticPredicateTy::Match;
828 
829     return DiagnosticPredicateTy::NearMatch;
830   }
831 
832   DiagnosticPredicate isSVEPattern() const {
833     if (!isImm())
834       return DiagnosticPredicateTy::NoMatch;
835     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
836     if (!MCE)
837       return DiagnosticPredicateTy::NoMatch;
838     int64_t Val = MCE->getValue();
839     if (Val >= 0 && Val < 32)
840       return DiagnosticPredicateTy::Match;
841     return DiagnosticPredicateTy::NearMatch;
842   }
843 
844   DiagnosticPredicate isSVEVecLenSpecifier() const {
845     if (!isImm())
846       return DiagnosticPredicateTy::NoMatch;
847     auto *MCE = dyn_cast<MCConstantExpr>(getImm());
848     if (!MCE)
849       return DiagnosticPredicateTy::NoMatch;
850     int64_t Val = MCE->getValue();
851     if (Val >= 0 && Val <= 1)
852       return DiagnosticPredicateTy::Match;
853     return DiagnosticPredicateTy::NearMatch;
854   }
855 
856   bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
857     AArch64MCExpr::VariantKind ELFRefKind;
858     MCSymbolRefExpr::VariantKind DarwinRefKind;
859     int64_t Addend;
860     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
861                                            Addend)) {
862       // If we don't understand the expression, assume the best and
863       // let the fixup and relocation code deal with it.
864       return true;
865     }
866 
867     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
868         ELFRefKind == AArch64MCExpr::VK_LO12 ||
869         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
870         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
871         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
872         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
873         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
874         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
875         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
876         ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
877         ELFRefKind == AArch64MCExpr::VK_SECREL_HI12 ||
878         ELFRefKind == AArch64MCExpr::VK_GOT_PAGE_LO15) {
879       // Note that we don't range-check the addend. It's adjusted modulo page
880       // size when converted, so there is no "out of range" condition when using
881       // @pageoff.
882       return true;
883     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
884                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
885       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
886       return Addend == 0;
887     }
888 
889     return false;
890   }
891 
892   template <int Scale> bool isUImm12Offset() const {
893     if (!isImm())
894       return false;
895 
896     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
897     if (!MCE)
898       return isSymbolicUImm12Offset(getImm());
899 
900     int64_t Val = MCE->getValue();
901     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
902   }
903 
904   template <int N, int M>
905   bool isImmInRange() const {
906     if (!isImm())
907       return false;
908     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
909     if (!MCE)
910       return false;
911     int64_t Val = MCE->getValue();
912     return (Val >= N && Val <= M);
913   }
914 
915   // NOTE: Also used for isLogicalImmNot as anything that can be represented as
916   // a logical immediate can always be represented when inverted.
917   template <typename T>
918   bool isLogicalImm() const {
919     if (!isImm())
920       return false;
921     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
922     if (!MCE)
923       return false;
924 
925     int64_t Val = MCE->getValue();
926     // Avoid left shift by 64 directly.
927     uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
928     // Allow all-0 or all-1 in top bits to permit bitwise NOT.
929     if ((Val & Upper) && (Val & Upper) != Upper)
930       return false;
931 
932     return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
933   }
934 
935   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
936 
937   bool isImmRange() const { return Kind == k_ImmRange; }
938 
939   /// Returns the immediate value as a pair of (imm, shift) if the immediate is
940   /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
941   /// immediate that can be shifted by 'Shift'.
942   template <unsigned Width>
943   std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
944     if (isShiftedImm() && Width == getShiftedImmShift())
945       if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
946         return std::make_pair(CE->getValue(), Width);
947 
948     if (isImm())
949       if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
950         int64_t Val = CE->getValue();
951         if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
952           return std::make_pair(Val >> Width, Width);
953         else
954           return std::make_pair(Val, 0u);
955       }
956 
957     return {};
958   }
959 
960   bool isAddSubImm() const {
961     if (!isShiftedImm() && !isImm())
962       return false;
963 
964     const MCExpr *Expr;
965 
966     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
967     if (isShiftedImm()) {
968       unsigned Shift = ShiftedImm.ShiftAmount;
969       Expr = ShiftedImm.Val;
970       if (Shift != 0 && Shift != 12)
971         return false;
972     } else {
973       Expr = getImm();
974     }
975 
976     AArch64MCExpr::VariantKind ELFRefKind;
977     MCSymbolRefExpr::VariantKind DarwinRefKind;
978     int64_t Addend;
979     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
980                                           DarwinRefKind, Addend)) {
981       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
982           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
983           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
984           || ELFRefKind == AArch64MCExpr::VK_LO12
985           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
986           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
987           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
988           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
989           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
990           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
991           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
992           || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
993           || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
994     }
995 
996     // If it's a constant, it should be a real immediate in range.
997     if (auto ShiftedVal = getShiftedVal<12>())
998       return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
999 
1000     // If it's an expression, we hope for the best and let the fixup/relocation
1001     // code deal with it.
1002     return true;
1003   }
1004 
1005   bool isAddSubImmNeg() const {
1006     if (!isShiftedImm() && !isImm())
1007       return false;
1008 
1009     // Otherwise it should be a real negative immediate in range.
1010     if (auto ShiftedVal = getShiftedVal<12>())
1011       return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1012 
1013     return false;
1014   }
1015 
1016   // Signed value in the range -128 to +127. For element widths of
1017   // 16 bits or higher it may also be a signed multiple of 256 in the
1018   // range -32768 to +32512.
1019   // For element-width of 8 bits a range of -128 to 255 is accepted,
1020   // since a copy of a byte can be either signed/unsigned.
1021   template <typename T>
1022   DiagnosticPredicate isSVECpyImm() const {
1023     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1024       return DiagnosticPredicateTy::NoMatch;
1025 
1026     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1027                   std::is_same<int8_t, T>::value;
1028     if (auto ShiftedImm = getShiftedVal<8>())
1029       if (!(IsByte && ShiftedImm->second) &&
1030           AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1031                                      << ShiftedImm->second))
1032         return DiagnosticPredicateTy::Match;
1033 
1034     return DiagnosticPredicateTy::NearMatch;
1035   }
1036 
1037   // Unsigned value in the range 0 to 255. For element widths of
1038   // 16 bits or higher it may also be a signed multiple of 256 in the
1039   // range 0 to 65280.
1040   template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1041     if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1042       return DiagnosticPredicateTy::NoMatch;
1043 
1044     bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1045                   std::is_same<int8_t, T>::value;
1046     if (auto ShiftedImm = getShiftedVal<8>())
1047       if (!(IsByte && ShiftedImm->second) &&
1048           AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1049                                         << ShiftedImm->second))
1050         return DiagnosticPredicateTy::Match;
1051 
1052     return DiagnosticPredicateTy::NearMatch;
1053   }
1054 
1055   template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1056     if (isLogicalImm<T>() && !isSVECpyImm<T>())
1057       return DiagnosticPredicateTy::Match;
1058     return DiagnosticPredicateTy::NoMatch;
1059   }
1060 
1061   bool isCondCode() const { return Kind == k_CondCode; }
1062 
1063   bool isSIMDImmType10() const {
1064     if (!isImm())
1065       return false;
1066     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1067     if (!MCE)
1068       return false;
1069     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
1070   }
1071 
1072   template<int N>
1073   bool isBranchTarget() const {
1074     if (!isImm())
1075       return false;
1076     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1077     if (!MCE)
1078       return true;
1079     int64_t Val = MCE->getValue();
1080     if (Val & 0x3)
1081       return false;
1082     assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1083     return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1084   }
1085 
1086   bool
1087   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
1088     if (!isImm())
1089       return false;
1090 
1091     AArch64MCExpr::VariantKind ELFRefKind;
1092     MCSymbolRefExpr::VariantKind DarwinRefKind;
1093     int64_t Addend;
1094     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1095                                              DarwinRefKind, Addend)) {
1096       return false;
1097     }
1098     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
1099       return false;
1100 
1101     return llvm::is_contained(AllowedModifiers, ELFRefKind);
1102   }
1103 
1104   bool isMovWSymbolG3() const {
1105     return isMovWSymbol({AArch64MCExpr::VK_ABS_G3, AArch64MCExpr::VK_PREL_G3});
1106   }
1107 
1108   bool isMovWSymbolG2() const {
1109     return isMovWSymbol(
1110         {AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
1111          AArch64MCExpr::VK_ABS_G2_NC, AArch64MCExpr::VK_PREL_G2,
1112          AArch64MCExpr::VK_PREL_G2_NC, AArch64MCExpr::VK_TPREL_G2,
1113          AArch64MCExpr::VK_DTPREL_G2});
1114   }
1115 
1116   bool isMovWSymbolG1() const {
1117     return isMovWSymbol(
1118         {AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
1119          AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_PREL_G1,
1120          AArch64MCExpr::VK_PREL_G1_NC, AArch64MCExpr::VK_GOTTPREL_G1,
1121          AArch64MCExpr::VK_TPREL_G1, AArch64MCExpr::VK_TPREL_G1_NC,
1122          AArch64MCExpr::VK_DTPREL_G1, AArch64MCExpr::VK_DTPREL_G1_NC});
1123   }
1124 
1125   bool isMovWSymbolG0() const {
1126     return isMovWSymbol(
1127         {AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
1128          AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_PREL_G0,
1129          AArch64MCExpr::VK_PREL_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
1130          AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_TPREL_G0_NC,
1131          AArch64MCExpr::VK_DTPREL_G0, AArch64MCExpr::VK_DTPREL_G0_NC});
1132   }
1133 
1134   template<int RegWidth, int Shift>
1135   bool isMOVZMovAlias() const {
1136     if (!isImm()) return false;
1137 
1138     const MCExpr *E = getImm();
1139     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1140       uint64_t Value = CE->getValue();
1141 
1142       return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1143     }
1144     // Only supports the case of Shift being 0 if an expression is used as an
1145     // operand
1146     return !Shift && E;
1147   }
1148 
1149   template<int RegWidth, int Shift>
1150   bool isMOVNMovAlias() const {
1151     if (!isImm()) return false;
1152 
1153     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1154     if (!CE) return false;
1155     uint64_t Value = CE->getValue();
1156 
1157     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1158   }
1159 
1160   bool isFPImm() const {
1161     return Kind == k_FPImm &&
1162            AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1163   }
1164 
1165   bool isBarrier() const {
1166     return Kind == k_Barrier && !getBarriernXSModifier();
1167   }
1168   bool isBarriernXS() const {
1169     return Kind == k_Barrier && getBarriernXSModifier();
1170   }
1171   bool isSysReg() const { return Kind == k_SysReg; }
1172 
1173   bool isMRSSystemRegister() const {
1174     if (!isSysReg()) return false;
1175 
1176     return SysReg.MRSReg != -1U;
1177   }
1178 
1179   bool isMSRSystemRegister() const {
1180     if (!isSysReg()) return false;
1181     return SysReg.MSRReg != -1U;
1182   }
1183 
1184   bool isSystemPStateFieldWithImm0_1() const {
1185     if (!isSysReg()) return false;
1186     return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1187   }
1188 
1189   bool isSystemPStateFieldWithImm0_15() const {
1190     if (!isSysReg())
1191       return false;
1192     return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1193   }
1194 
1195   bool isSVCR() const {
1196     if (Kind != k_SVCR)
1197       return false;
1198     return SVCR.PStateField != -1U;
1199   }
1200 
1201   bool isReg() const override {
1202     return Kind == k_Register;
1203   }
1204 
1205   bool isVectorList() const { return Kind == k_VectorList; }
1206 
1207   bool isScalarReg() const {
1208     return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1209   }
1210 
1211   bool isNeonVectorReg() const {
1212     return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1213   }
1214 
1215   bool isNeonVectorRegLo() const {
1216     return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1217            (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1218                 Reg.RegNum) ||
1219             AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1220                 Reg.RegNum));
1221   }
1222 
1223   bool isMatrix() const { return Kind == k_MatrixRegister; }
1224   bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1225 
1226   template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1227     RegKind RK;
1228     switch (Class) {
1229     case AArch64::PPRRegClassID:
1230     case AArch64::PPR_3bRegClassID:
1231     case AArch64::PPR_p8to15RegClassID:
1232       RK = RegKind::SVEPredicateAsCounter;
1233       break;
1234     default:
1235       llvm_unreachable("Unsupport register class");
1236     }
1237 
1238     return (Kind == k_Register && Reg.Kind == RK) &&
1239            AArch64MCRegisterClasses[Class].contains(getReg());
1240   }
1241 
1242   template <unsigned Class> bool isSVEVectorReg() const {
1243     RegKind RK;
1244     switch (Class) {
1245     case AArch64::ZPRRegClassID:
1246     case AArch64::ZPR_3bRegClassID:
1247     case AArch64::ZPR_4bRegClassID:
1248       RK = RegKind::SVEDataVector;
1249       break;
1250     case AArch64::PPRRegClassID:
1251     case AArch64::PPR_3bRegClassID:
1252       RK = RegKind::SVEPredicateVector;
1253       break;
1254     default:
1255       llvm_unreachable("Unsupport register class");
1256     }
1257 
1258     return (Kind == k_Register && Reg.Kind == RK) &&
1259            AArch64MCRegisterClasses[Class].contains(getReg());
1260   }
1261 
1262   template <unsigned Class> bool isFPRasZPR() const {
1263     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1264            AArch64MCRegisterClasses[Class].contains(getReg());
1265   }
1266 
1267   template <int ElementWidth, unsigned Class>
1268   DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1269     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1270       return DiagnosticPredicateTy::NoMatch;
1271 
1272     if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1273       return DiagnosticPredicateTy::Match;
1274 
1275     return DiagnosticPredicateTy::NearMatch;
1276   }
1277 
1278   template <int ElementWidth, unsigned Class>
1279   DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1280     if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1281       return DiagnosticPredicateTy::NoMatch;
1282 
1283     if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1284       return DiagnosticPredicateTy::Match;
1285 
1286     return DiagnosticPredicateTy::NearMatch;
1287   }
1288 
1289   template <int ElementWidth, unsigned Class>
1290   DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1291     if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1292       return DiagnosticPredicateTy::NoMatch;
1293 
1294     if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1295       return DiagnosticPredicateTy::Match;
1296 
1297     return DiagnosticPredicateTy::NearMatch;
1298   }
1299 
1300   template <int ElementWidth, unsigned Class,
1301             AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1302             bool ShiftWidthAlwaysSame>
1303   DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1304     auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1305     if (!VectorMatch.isMatch())
1306       return DiagnosticPredicateTy::NoMatch;
1307 
1308     // Give a more specific diagnostic when the user has explicitly typed in
1309     // a shift-amount that does not match what is expected, but for which
1310     // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1311     bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1312     if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1313                         ShiftExtendTy == AArch64_AM::SXTW) &&
1314         !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1315       return DiagnosticPredicateTy::NoMatch;
1316 
1317     if (MatchShift && ShiftExtendTy == getShiftExtendType())
1318       return DiagnosticPredicateTy::Match;
1319 
1320     return DiagnosticPredicateTy::NearMatch;
1321   }
1322 
1323   bool isGPR32as64() const {
1324     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1325       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1326   }
1327 
1328   bool isGPR64as32() const {
1329     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1330       AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1331   }
1332 
1333   bool isGPR64x8() const {
1334     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1335            AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1336                Reg.RegNum);
1337   }
1338 
1339   bool isWSeqPair() const {
1340     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1341            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1342                Reg.RegNum);
1343   }
1344 
1345   bool isXSeqPair() const {
1346     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1347            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1348                Reg.RegNum);
1349   }
1350 
1351   bool isSyspXzrPair() const {
1352     return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1353   }
1354 
1355   template<int64_t Angle, int64_t Remainder>
1356   DiagnosticPredicate isComplexRotation() const {
1357     if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1358 
1359     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1360     if (!CE) return DiagnosticPredicateTy::NoMatch;
1361     uint64_t Value = CE->getValue();
1362 
1363     if (Value % Angle == Remainder && Value <= 270)
1364       return DiagnosticPredicateTy::Match;
1365     return DiagnosticPredicateTy::NearMatch;
1366   }
1367 
1368   template <unsigned RegClassID> bool isGPR64() const {
1369     return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1370            AArch64MCRegisterClasses[RegClassID].contains(getReg());
1371   }
1372 
1373   template <unsigned RegClassID, int ExtWidth>
1374   DiagnosticPredicate isGPR64WithShiftExtend() const {
1375     if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1376       return DiagnosticPredicateTy::NoMatch;
1377 
1378     if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1379         getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1380       return DiagnosticPredicateTy::Match;
1381     return DiagnosticPredicateTy::NearMatch;
1382   }
1383 
1384   /// Is this a vector list with the type implicit (presumably attached to the
1385   /// instruction itself)?
1386   template <RegKind VectorKind, unsigned NumRegs>
1387   bool isImplicitlyTypedVectorList() const {
1388     return Kind == k_VectorList && VectorList.Count == NumRegs &&
1389            VectorList.NumElements == 0 &&
1390            VectorList.RegisterKind == VectorKind;
1391   }
1392 
1393   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1394             unsigned ElementWidth, unsigned Stride = 1>
1395   bool isTypedVectorList() const {
1396     if (Kind != k_VectorList)
1397       return false;
1398     if (VectorList.Count != NumRegs)
1399       return false;
1400     if (VectorList.RegisterKind != VectorKind)
1401       return false;
1402     if (VectorList.ElementWidth != ElementWidth)
1403       return false;
1404     if (VectorList.Stride != Stride)
1405       return false;
1406     return VectorList.NumElements == NumElements;
1407   }
1408 
1409   template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1410             unsigned ElementWidth>
1411   DiagnosticPredicate isTypedVectorListMultiple() const {
1412     bool Res =
1413         isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1414     if (!Res)
1415       return DiagnosticPredicateTy::NoMatch;
1416     if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1417       return DiagnosticPredicateTy::NearMatch;
1418     return DiagnosticPredicateTy::Match;
1419   }
1420 
1421   template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1422             unsigned ElementWidth>
1423   DiagnosticPredicate isTypedVectorListStrided() const {
1424     bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1425                                  ElementWidth, Stride>();
1426     if (!Res)
1427       return DiagnosticPredicateTy::NoMatch;
1428     if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1429         ((VectorList.RegNum >= AArch64::Z16) &&
1430          (VectorList.RegNum < (AArch64::Z16 + Stride))))
1431       return DiagnosticPredicateTy::Match;
1432     return DiagnosticPredicateTy::NoMatch;
1433   }
1434 
1435   template <int Min, int Max>
1436   DiagnosticPredicate isVectorIndex() const {
1437     if (Kind != k_VectorIndex)
1438       return DiagnosticPredicateTy::NoMatch;
1439     if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1440       return DiagnosticPredicateTy::Match;
1441     return DiagnosticPredicateTy::NearMatch;
1442   }
1443 
1444   bool isToken() const override { return Kind == k_Token; }
1445 
1446   bool isTokenEqual(StringRef Str) const {
1447     return Kind == k_Token && getToken() == Str;
1448   }
1449   bool isSysCR() const { return Kind == k_SysCR; }
1450   bool isPrefetch() const { return Kind == k_Prefetch; }
1451   bool isPSBHint() const { return Kind == k_PSBHint; }
1452   bool isBTIHint() const { return Kind == k_BTIHint; }
1453   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1454   bool isShifter() const {
1455     if (!isShiftExtend())
1456       return false;
1457 
1458     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1459     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1460             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1461             ST == AArch64_AM::MSL);
1462   }
1463 
1464   template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1465     if (Kind != k_FPImm)
1466       return DiagnosticPredicateTy::NoMatch;
1467 
1468     if (getFPImmIsExact()) {
1469       // Lookup the immediate from table of supported immediates.
1470       auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1471       assert(Desc && "Unknown enum value");
1472 
1473       // Calculate its FP value.
1474       APFloat RealVal(APFloat::IEEEdouble());
1475       auto StatusOrErr =
1476           RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1477       if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1478         llvm_unreachable("FP immediate is not exact");
1479 
1480       if (getFPImm().bitwiseIsEqual(RealVal))
1481         return DiagnosticPredicateTy::Match;
1482     }
1483 
1484     return DiagnosticPredicateTy::NearMatch;
1485   }
1486 
1487   template <unsigned ImmA, unsigned ImmB>
1488   DiagnosticPredicate isExactFPImm() const {
1489     DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1490     if ((Res = isExactFPImm<ImmA>()))
1491       return DiagnosticPredicateTy::Match;
1492     if ((Res = isExactFPImm<ImmB>()))
1493       return DiagnosticPredicateTy::Match;
1494     return Res;
1495   }
1496 
1497   bool isExtend() const {
1498     if (!isShiftExtend())
1499       return false;
1500 
1501     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1502     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1503             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1504             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1505             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1506             ET == AArch64_AM::LSL) &&
1507            getShiftExtendAmount() <= 4;
1508   }
1509 
1510   bool isExtend64() const {
1511     if (!isExtend())
1512       return false;
1513     // Make sure the extend expects a 32-bit source register.
1514     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1515     return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1516            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1517            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1518   }
1519 
1520   bool isExtendLSL64() const {
1521     if (!isExtend())
1522       return false;
1523     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1524     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1525             ET == AArch64_AM::LSL) &&
1526            getShiftExtendAmount() <= 4;
1527   }
1528 
1529   template<int Width> bool isMemXExtend() const {
1530     if (!isExtend())
1531       return false;
1532     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1533     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1534            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1535             getShiftExtendAmount() == 0);
1536   }
1537 
1538   template<int Width> bool isMemWExtend() const {
1539     if (!isExtend())
1540       return false;
1541     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1542     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1543            (getShiftExtendAmount() == Log2_32(Width / 8) ||
1544             getShiftExtendAmount() == 0);
1545   }
1546 
1547   template <unsigned width>
1548   bool isArithmeticShifter() const {
1549     if (!isShifter())
1550       return false;
1551 
1552     // An arithmetic shifter is LSL, LSR, or ASR.
1553     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1554     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1555             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1556   }
1557 
1558   template <unsigned width>
1559   bool isLogicalShifter() const {
1560     if (!isShifter())
1561       return false;
1562 
1563     // A logical shifter is LSL, LSR, ASR or ROR.
1564     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1565     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1566             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1567            getShiftExtendAmount() < width;
1568   }
1569 
1570   bool isMovImm32Shifter() const {
1571     if (!isShifter())
1572       return false;
1573 
1574     // A MOVi shifter is LSL of 0, 16, 32, or 48.
1575     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1576     if (ST != AArch64_AM::LSL)
1577       return false;
1578     uint64_t Val = getShiftExtendAmount();
1579     return (Val == 0 || Val == 16);
1580   }
1581 
1582   bool isMovImm64Shifter() const {
1583     if (!isShifter())
1584       return false;
1585 
1586     // A MOVi shifter is LSL of 0 or 16.
1587     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1588     if (ST != AArch64_AM::LSL)
1589       return false;
1590     uint64_t Val = getShiftExtendAmount();
1591     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1592   }
1593 
1594   bool isLogicalVecShifter() const {
1595     if (!isShifter())
1596       return false;
1597 
1598     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1599     unsigned Shift = getShiftExtendAmount();
1600     return getShiftExtendType() == AArch64_AM::LSL &&
1601            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1602   }
1603 
1604   bool isLogicalVecHalfWordShifter() const {
1605     if (!isLogicalVecShifter())
1606       return false;
1607 
1608     // A logical vector shifter is a left shift by 0 or 8.
1609     unsigned Shift = getShiftExtendAmount();
1610     return getShiftExtendType() == AArch64_AM::LSL &&
1611            (Shift == 0 || Shift == 8);
1612   }
1613 
1614   bool isMoveVecShifter() const {
1615     if (!isShiftExtend())
1616       return false;
1617 
1618     // A logical vector shifter is a left shift by 8 or 16.
1619     unsigned Shift = getShiftExtendAmount();
1620     return getShiftExtendType() == AArch64_AM::MSL &&
1621            (Shift == 8 || Shift == 16);
1622   }
1623 
1624   // Fallback unscaled operands are for aliases of LDR/STR that fall back
1625   // to LDUR/STUR when the offset is not legal for the former but is for
1626   // the latter. As such, in addition to checking for being a legal unscaled
1627   // address, also check that it is not a legal scaled address. This avoids
1628   // ambiguity in the matcher.
1629   template<int Width>
1630   bool isSImm9OffsetFB() const {
1631     return isSImm<9>() && !isUImm12Offset<Width / 8>();
1632   }
1633 
1634   bool isAdrpLabel() const {
1635     // Validation was handled during parsing, so we just verify that
1636     // something didn't go haywire.
1637     if (!isImm())
1638         return false;
1639 
1640     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1641       int64_t Val = CE->getValue();
1642       int64_t Min = - (4096 * (1LL << (21 - 1)));
1643       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1644       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1645     }
1646 
1647     return true;
1648   }
1649 
1650   bool isAdrLabel() const {
1651     // Validation was handled during parsing, so we just verify that
1652     // something didn't go haywire.
1653     if (!isImm())
1654         return false;
1655 
1656     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1657       int64_t Val = CE->getValue();
1658       int64_t Min = - (1LL << (21 - 1));
1659       int64_t Max = ((1LL << (21 - 1)) - 1);
1660       return Val >= Min && Val <= Max;
1661     }
1662 
1663     return true;
1664   }
1665 
1666   template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1667   DiagnosticPredicate isMatrixRegOperand() const {
1668     if (!isMatrix())
1669       return DiagnosticPredicateTy::NoMatch;
1670     if (getMatrixKind() != Kind ||
1671         !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1672         EltSize != getMatrixElementWidth())
1673       return DiagnosticPredicateTy::NearMatch;
1674     return DiagnosticPredicateTy::Match;
1675   }
1676 
1677   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1678     // Add as immediates when possible.  Null MCExpr = 0.
1679     if (!Expr)
1680       Inst.addOperand(MCOperand::createImm(0));
1681     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1682       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1683     else
1684       Inst.addOperand(MCOperand::createExpr(Expr));
1685   }
1686 
1687   void addRegOperands(MCInst &Inst, unsigned N) const {
1688     assert(N == 1 && "Invalid number of operands!");
1689     Inst.addOperand(MCOperand::createReg(getReg()));
1690   }
1691 
1692   void addMatrixOperands(MCInst &Inst, unsigned N) const {
1693     assert(N == 1 && "Invalid number of operands!");
1694     Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1695   }
1696 
1697   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1698     assert(N == 1 && "Invalid number of operands!");
1699     assert(
1700         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1701 
1702     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1703     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1704         RI->getEncodingValue(getReg()));
1705 
1706     Inst.addOperand(MCOperand::createReg(Reg));
1707   }
1708 
1709   void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1710     assert(N == 1 && "Invalid number of operands!");
1711     assert(
1712         AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1713 
1714     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1715     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1716         RI->getEncodingValue(getReg()));
1717 
1718     Inst.addOperand(MCOperand::createReg(Reg));
1719   }
1720 
1721   template <int Width>
1722   void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1723     unsigned Base;
1724     switch (Width) {
1725     case 8:   Base = AArch64::B0; break;
1726     case 16:  Base = AArch64::H0; break;
1727     case 32:  Base = AArch64::S0; break;
1728     case 64:  Base = AArch64::D0; break;
1729     case 128: Base = AArch64::Q0; break;
1730     default:
1731       llvm_unreachable("Unsupported width");
1732     }
1733     Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1734   }
1735 
1736   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1737     assert(N == 1 && "Invalid number of operands!");
1738     assert(
1739         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1740     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1741   }
1742 
1743   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1744     assert(N == 1 && "Invalid number of operands!");
1745     assert(
1746         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1747     Inst.addOperand(MCOperand::createReg(getReg()));
1748   }
1749 
1750   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1751     assert(N == 1 && "Invalid number of operands!");
1752     Inst.addOperand(MCOperand::createReg(getReg()));
1753   }
1754 
1755   enum VecListIndexType {
1756     VecListIdx_DReg = 0,
1757     VecListIdx_QReg = 1,
1758     VecListIdx_ZReg = 2,
1759     VecListIdx_PReg = 3,
1760   };
1761 
1762   template <VecListIndexType RegTy, unsigned NumRegs>
1763   void addVectorListOperands(MCInst &Inst, unsigned N) const {
1764     assert(N == 1 && "Invalid number of operands!");
1765     static const unsigned FirstRegs[][5] = {
1766       /* DReg */ { AArch64::Q0,
1767                    AArch64::D0,       AArch64::D0_D1,
1768                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1769       /* QReg */ { AArch64::Q0,
1770                    AArch64::Q0,       AArch64::Q0_Q1,
1771                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1772       /* ZReg */ { AArch64::Z0,
1773                    AArch64::Z0,       AArch64::Z0_Z1,
1774                    AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1775       /* PReg */ { AArch64::P0,
1776                    AArch64::P0,       AArch64::P0_P1 }
1777     };
1778 
1779     assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1780            " NumRegs must be <= 4 for ZRegs");
1781 
1782     assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1783            " NumRegs must be <= 2 for PRegs");
1784 
1785     unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1786     Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1787                                          FirstRegs[(unsigned)RegTy][0]));
1788   }
1789 
1790   template <unsigned NumRegs>
1791   void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1792     assert(N == 1 && "Invalid number of operands!");
1793     assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1794 
1795     switch (NumRegs) {
1796     case 2:
1797       if (getVectorListStart() < AArch64::Z16) {
1798         assert((getVectorListStart() < AArch64::Z8) &&
1799                (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1800         Inst.addOperand(MCOperand::createReg(
1801             AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1802       } else {
1803         assert((getVectorListStart() < AArch64::Z24) &&
1804                (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1805         Inst.addOperand(MCOperand::createReg(
1806             AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1807       }
1808       break;
1809     case 4:
1810       if (getVectorListStart() < AArch64::Z16) {
1811         assert((getVectorListStart() < AArch64::Z4) &&
1812                (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1813         Inst.addOperand(MCOperand::createReg(
1814             AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1815       } else {
1816         assert((getVectorListStart() < AArch64::Z20) &&
1817                (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1818         Inst.addOperand(MCOperand::createReg(
1819             AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1820       }
1821       break;
1822     default:
1823       llvm_unreachable("Unsupported number of registers for strided vec list");
1824     }
1825   }
1826 
1827   void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1828     assert(N == 1 && "Invalid number of operands!");
1829     unsigned RegMask = getMatrixTileListRegMask();
1830     assert(RegMask <= 0xFF && "Invalid mask!");
1831     Inst.addOperand(MCOperand::createImm(RegMask));
1832   }
1833 
1834   void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1835     assert(N == 1 && "Invalid number of operands!");
1836     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1837   }
1838 
1839   template <unsigned ImmIs0, unsigned ImmIs1>
1840   void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1841     assert(N == 1 && "Invalid number of operands!");
1842     assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1843     Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1844   }
1845 
1846   void addImmOperands(MCInst &Inst, unsigned N) const {
1847     assert(N == 1 && "Invalid number of operands!");
1848     // If this is a pageoff symrefexpr with an addend, adjust the addend
1849     // to be only the page-offset portion. Otherwise, just add the expr
1850     // as-is.
1851     addExpr(Inst, getImm());
1852   }
1853 
1854   template <int Shift>
1855   void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1856     assert(N == 2 && "Invalid number of operands!");
1857     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1858       Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1859       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1860     } else if (isShiftedImm()) {
1861       addExpr(Inst, getShiftedImmVal());
1862       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1863     } else {
1864       addExpr(Inst, getImm());
1865       Inst.addOperand(MCOperand::createImm(0));
1866     }
1867   }
1868 
1869   template <int Shift>
1870   void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1871     assert(N == 2 && "Invalid number of operands!");
1872     if (auto ShiftedVal = getShiftedVal<Shift>()) {
1873       Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1874       Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1875     } else
1876       llvm_unreachable("Not a shifted negative immediate");
1877   }
1878 
1879   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1880     assert(N == 1 && "Invalid number of operands!");
1881     Inst.addOperand(MCOperand::createImm(getCondCode()));
1882   }
1883 
1884   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1885     assert(N == 1 && "Invalid number of operands!");
1886     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1887     if (!MCE)
1888       addExpr(Inst, getImm());
1889     else
1890       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1891   }
1892 
1893   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1894     addImmOperands(Inst, N);
1895   }
1896 
1897   template<int Scale>
1898   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1899     assert(N == 1 && "Invalid number of operands!");
1900     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1901 
1902     if (!MCE) {
1903       Inst.addOperand(MCOperand::createExpr(getImm()));
1904       return;
1905     }
1906     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1907   }
1908 
1909   void addUImm6Operands(MCInst &Inst, unsigned N) const {
1910     assert(N == 1 && "Invalid number of operands!");
1911     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1912     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1913   }
1914 
1915   template <int Scale>
1916   void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1917     assert(N == 1 && "Invalid number of operands!");
1918     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1919     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1920   }
1921 
1922   template <int Scale>
1923   void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
1924     assert(N == 1 && "Invalid number of operands!");
1925     Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
1926   }
1927 
1928   template <typename T>
1929   void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1930     assert(N == 1 && "Invalid number of operands!");
1931     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1932     std::make_unsigned_t<T> Val = MCE->getValue();
1933     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1934     Inst.addOperand(MCOperand::createImm(encoding));
1935   }
1936 
1937   template <typename T>
1938   void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1939     assert(N == 1 && "Invalid number of operands!");
1940     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1941     std::make_unsigned_t<T> Val = ~MCE->getValue();
1942     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1943     Inst.addOperand(MCOperand::createImm(encoding));
1944   }
1945 
1946   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1947     assert(N == 1 && "Invalid number of operands!");
1948     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1949     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1950     Inst.addOperand(MCOperand::createImm(encoding));
1951   }
1952 
1953   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1954     // Branch operands don't encode the low bits, so shift them off
1955     // here. If it's a label, however, just put it on directly as there's
1956     // not enough information now to do anything.
1957     assert(N == 1 && "Invalid number of operands!");
1958     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1959     if (!MCE) {
1960       addExpr(Inst, getImm());
1961       return;
1962     }
1963     assert(MCE && "Invalid constant immediate operand!");
1964     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1965   }
1966 
1967   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1968     // Branch operands don't encode the low bits, so shift them off
1969     // here. If it's a label, however, just put it on directly as there's
1970     // not enough information now to do anything.
1971     assert(N == 1 && "Invalid number of operands!");
1972     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1973     if (!MCE) {
1974       addExpr(Inst, getImm());
1975       return;
1976     }
1977     assert(MCE && "Invalid constant immediate operand!");
1978     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1979   }
1980 
1981   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1982     // Branch operands don't encode the low bits, so shift them off
1983     // here. If it's a label, however, just put it on directly as there's
1984     // not enough information now to do anything.
1985     assert(N == 1 && "Invalid number of operands!");
1986     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1987     if (!MCE) {
1988       addExpr(Inst, getImm());
1989       return;
1990     }
1991     assert(MCE && "Invalid constant immediate operand!");
1992     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1993   }
1994 
1995   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1996     assert(N == 1 && "Invalid number of operands!");
1997     Inst.addOperand(MCOperand::createImm(
1998         AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1999   }
2000 
2001   void addBarrierOperands(MCInst &Inst, unsigned N) const {
2002     assert(N == 1 && "Invalid number of operands!");
2003     Inst.addOperand(MCOperand::createImm(getBarrier()));
2004   }
2005 
2006   void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2007     assert(N == 1 && "Invalid number of operands!");
2008     Inst.addOperand(MCOperand::createImm(getBarrier()));
2009   }
2010 
2011   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2012     assert(N == 1 && "Invalid number of operands!");
2013 
2014     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2015   }
2016 
2017   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2018     assert(N == 1 && "Invalid number of operands!");
2019 
2020     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2021   }
2022 
2023   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2024     assert(N == 1 && "Invalid number of operands!");
2025 
2026     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2027   }
2028 
2029   void addSVCROperands(MCInst &Inst, unsigned N) const {
2030     assert(N == 1 && "Invalid number of operands!");
2031 
2032     Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2033   }
2034 
2035   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2036     assert(N == 1 && "Invalid number of operands!");
2037 
2038     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2039   }
2040 
2041   void addSysCROperands(MCInst &Inst, unsigned N) const {
2042     assert(N == 1 && "Invalid number of operands!");
2043     Inst.addOperand(MCOperand::createImm(getSysCR()));
2044   }
2045 
2046   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2047     assert(N == 1 && "Invalid number of operands!");
2048     Inst.addOperand(MCOperand::createImm(getPrefetch()));
2049   }
2050 
2051   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2052     assert(N == 1 && "Invalid number of operands!");
2053     Inst.addOperand(MCOperand::createImm(getPSBHint()));
2054   }
2055 
2056   void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2057     assert(N == 1 && "Invalid number of operands!");
2058     Inst.addOperand(MCOperand::createImm(getBTIHint()));
2059   }
2060 
2061   void addShifterOperands(MCInst &Inst, unsigned N) const {
2062     assert(N == 1 && "Invalid number of operands!");
2063     unsigned Imm =
2064         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2065     Inst.addOperand(MCOperand::createImm(Imm));
2066   }
2067 
2068   void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2069     assert(N == 1 && "Invalid number of operands!");
2070 
2071     if (!isScalarReg())
2072       return;
2073 
2074     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2075     uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2076                        .getRegister(RI->getEncodingValue(getReg()));
2077     if (Reg != AArch64::XZR)
2078       llvm_unreachable("wrong register");
2079 
2080     Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2081   }
2082 
2083   void addExtendOperands(MCInst &Inst, unsigned N) const {
2084     assert(N == 1 && "Invalid number of operands!");
2085     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2086     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2087     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2088     Inst.addOperand(MCOperand::createImm(Imm));
2089   }
2090 
2091   void addExtend64Operands(MCInst &Inst, unsigned N) const {
2092     assert(N == 1 && "Invalid number of operands!");
2093     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2094     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2095     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2096     Inst.addOperand(MCOperand::createImm(Imm));
2097   }
2098 
2099   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2100     assert(N == 2 && "Invalid number of operands!");
2101     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2102     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2103     Inst.addOperand(MCOperand::createImm(IsSigned));
2104     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2105   }
2106 
2107   // For 8-bit load/store instructions with a register offset, both the
2108   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2109   // they're disambiguated by whether the shift was explicit or implicit rather
2110   // than its size.
2111   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2112     assert(N == 2 && "Invalid number of operands!");
2113     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2114     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2115     Inst.addOperand(MCOperand::createImm(IsSigned));
2116     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2117   }
2118 
2119   template<int Shift>
2120   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2121     assert(N == 1 && "Invalid number of operands!");
2122 
2123     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2124     if (CE) {
2125       uint64_t Value = CE->getValue();
2126       Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2127     } else {
2128       addExpr(Inst, getImm());
2129     }
2130   }
2131 
2132   template<int Shift>
2133   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2134     assert(N == 1 && "Invalid number of operands!");
2135 
2136     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2137     uint64_t Value = CE->getValue();
2138     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2139   }
2140 
2141   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2142     assert(N == 1 && "Invalid number of operands!");
2143     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2144     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2145   }
2146 
2147   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2148     assert(N == 1 && "Invalid number of operands!");
2149     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2150     Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2151   }
2152 
2153   void print(raw_ostream &OS) const override;
2154 
2155   static std::unique_ptr<AArch64Operand>
2156   CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2157     auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2158     Op->Tok.Data = Str.data();
2159     Op->Tok.Length = Str.size();
2160     Op->Tok.IsSuffix = IsSuffix;
2161     Op->StartLoc = S;
2162     Op->EndLoc = S;
2163     return Op;
2164   }
2165 
2166   static std::unique_ptr<AArch64Operand>
2167   CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2168             RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2169             AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2170             unsigned ShiftAmount = 0,
2171             unsigned HasExplicitAmount = false) {
2172     auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2173     Op->Reg.RegNum = RegNum;
2174     Op->Reg.Kind = Kind;
2175     Op->Reg.ElementWidth = 0;
2176     Op->Reg.EqualityTy = EqTy;
2177     Op->Reg.ShiftExtend.Type = ExtTy;
2178     Op->Reg.ShiftExtend.Amount = ShiftAmount;
2179     Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2180     Op->StartLoc = S;
2181     Op->EndLoc = E;
2182     return Op;
2183   }
2184 
2185   static std::unique_ptr<AArch64Operand>
2186   CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2187                   SMLoc S, SMLoc E, MCContext &Ctx,
2188                   AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
2189                   unsigned ShiftAmount = 0,
2190                   unsigned HasExplicitAmount = false) {
2191     assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2192             Kind == RegKind::SVEPredicateVector ||
2193             Kind == RegKind::SVEPredicateAsCounter) &&
2194            "Invalid vector kind");
2195     auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2196                         HasExplicitAmount);
2197     Op->Reg.ElementWidth = ElementWidth;
2198     return Op;
2199   }
2200 
2201   static std::unique_ptr<AArch64Operand>
2202   CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2203                    unsigned NumElements, unsigned ElementWidth,
2204                    RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2205     auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2206     Op->VectorList.RegNum = RegNum;
2207     Op->VectorList.Count = Count;
2208     Op->VectorList.Stride = Stride;
2209     Op->VectorList.NumElements = NumElements;
2210     Op->VectorList.ElementWidth = ElementWidth;
2211     Op->VectorList.RegisterKind = RegisterKind;
2212     Op->StartLoc = S;
2213     Op->EndLoc = E;
2214     return Op;
2215   }
2216 
2217   static std::unique_ptr<AArch64Operand>
2218   CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2219     auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2220     Op->VectorIndex.Val = Idx;
2221     Op->StartLoc = S;
2222     Op->EndLoc = E;
2223     return Op;
2224   }
2225 
2226   static std::unique_ptr<AArch64Operand>
2227   CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2228     auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2229     Op->MatrixTileList.RegMask = RegMask;
2230     Op->StartLoc = S;
2231     Op->EndLoc = E;
2232     return Op;
2233   }
2234 
2235   static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2236                                   const unsigned ElementWidth) {
2237     static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2238         RegMap = {
2239             {{0, AArch64::ZAB0},
2240              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2241               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2242             {{8, AArch64::ZAB0},
2243              {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2244               AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2245             {{16, AArch64::ZAH0},
2246              {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2247             {{16, AArch64::ZAH1},
2248              {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2249             {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2250             {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2251             {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2252             {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2253         };
2254 
2255     if (ElementWidth == 64)
2256       OutRegs.insert(Reg);
2257     else {
2258       std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2259       assert(!Regs.empty() && "Invalid tile or element width!");
2260       for (auto OutReg : Regs)
2261         OutRegs.insert(OutReg);
2262     }
2263   }
2264 
2265   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2266                                                    SMLoc E, MCContext &Ctx) {
2267     auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2268     Op->Imm.Val = Val;
2269     Op->StartLoc = S;
2270     Op->EndLoc = E;
2271     return Op;
2272   }
2273 
2274   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2275                                                           unsigned ShiftAmount,
2276                                                           SMLoc S, SMLoc E,
2277                                                           MCContext &Ctx) {
2278     auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2279     Op->ShiftedImm .Val = Val;
2280     Op->ShiftedImm.ShiftAmount = ShiftAmount;
2281     Op->StartLoc = S;
2282     Op->EndLoc = E;
2283     return Op;
2284   }
2285 
2286   static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2287                                                         unsigned Last, SMLoc S,
2288                                                         SMLoc E,
2289                                                         MCContext &Ctx) {
2290     auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2291     Op->ImmRange.First = First;
2292     Op->ImmRange.Last = Last;
2293     Op->EndLoc = E;
2294     return Op;
2295   }
2296 
2297   static std::unique_ptr<AArch64Operand>
2298   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2299     auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2300     Op->CondCode.Code = Code;
2301     Op->StartLoc = S;
2302     Op->EndLoc = E;
2303     return Op;
2304   }
2305 
2306   static std::unique_ptr<AArch64Operand>
2307   CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2308     auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2309     Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2310     Op->FPImm.IsExact = IsExact;
2311     Op->StartLoc = S;
2312     Op->EndLoc = S;
2313     return Op;
2314   }
2315 
2316   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2317                                                        StringRef Str,
2318                                                        SMLoc S,
2319                                                        MCContext &Ctx,
2320                                                        bool HasnXSModifier) {
2321     auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2322     Op->Barrier.Val = Val;
2323     Op->Barrier.Data = Str.data();
2324     Op->Barrier.Length = Str.size();
2325     Op->Barrier.HasnXSModifier = HasnXSModifier;
2326     Op->StartLoc = S;
2327     Op->EndLoc = S;
2328     return Op;
2329   }
2330 
2331   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2332                                                       uint32_t MRSReg,
2333                                                       uint32_t MSRReg,
2334                                                       uint32_t PStateField,
2335                                                       MCContext &Ctx) {
2336     auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2337     Op->SysReg.Data = Str.data();
2338     Op->SysReg.Length = Str.size();
2339     Op->SysReg.MRSReg = MRSReg;
2340     Op->SysReg.MSRReg = MSRReg;
2341     Op->SysReg.PStateField = PStateField;
2342     Op->StartLoc = S;
2343     Op->EndLoc = S;
2344     return Op;
2345   }
2346 
2347   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2348                                                      SMLoc E, MCContext &Ctx) {
2349     auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2350     Op->SysCRImm.Val = Val;
2351     Op->StartLoc = S;
2352     Op->EndLoc = E;
2353     return Op;
2354   }
2355 
2356   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2357                                                         StringRef Str,
2358                                                         SMLoc S,
2359                                                         MCContext &Ctx) {
2360     auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2361     Op->Prefetch.Val = Val;
2362     Op->Barrier.Data = Str.data();
2363     Op->Barrier.Length = Str.size();
2364     Op->StartLoc = S;
2365     Op->EndLoc = S;
2366     return Op;
2367   }
2368 
2369   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2370                                                        StringRef Str,
2371                                                        SMLoc S,
2372                                                        MCContext &Ctx) {
2373     auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2374     Op->PSBHint.Val = Val;
2375     Op->PSBHint.Data = Str.data();
2376     Op->PSBHint.Length = Str.size();
2377     Op->StartLoc = S;
2378     Op->EndLoc = S;
2379     return Op;
2380   }
2381 
2382   static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2383                                                        StringRef Str,
2384                                                        SMLoc S,
2385                                                        MCContext &Ctx) {
2386     auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2387     Op->BTIHint.Val = Val | 32;
2388     Op->BTIHint.Data = Str.data();
2389     Op->BTIHint.Length = Str.size();
2390     Op->StartLoc = S;
2391     Op->EndLoc = S;
2392     return Op;
2393   }
2394 
2395   static std::unique_ptr<AArch64Operand>
2396   CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2397                        SMLoc S, SMLoc E, MCContext &Ctx) {
2398     auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2399     Op->MatrixReg.RegNum = RegNum;
2400     Op->MatrixReg.ElementWidth = ElementWidth;
2401     Op->MatrixReg.Kind = Kind;
2402     Op->StartLoc = S;
2403     Op->EndLoc = E;
2404     return Op;
2405   }
2406 
2407   static std::unique_ptr<AArch64Operand>
2408   CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2409     auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2410     Op->SVCR.PStateField = PStateField;
2411     Op->SVCR.Data = Str.data();
2412     Op->SVCR.Length = Str.size();
2413     Op->StartLoc = S;
2414     Op->EndLoc = S;
2415     return Op;
2416   }
2417 
2418   static std::unique_ptr<AArch64Operand>
2419   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2420                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2421     auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2422     Op->ShiftExtend.Type = ShOp;
2423     Op->ShiftExtend.Amount = Val;
2424     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2425     Op->StartLoc = S;
2426     Op->EndLoc = E;
2427     return Op;
2428   }
2429 };
2430 
2431 } // end anonymous namespace.
2432 
2433 void AArch64Operand::print(raw_ostream &OS) const {
2434   switch (Kind) {
2435   case k_FPImm:
2436     OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2437     if (!getFPImmIsExact())
2438       OS << " (inexact)";
2439     OS << ">";
2440     break;
2441   case k_Barrier: {
2442     StringRef Name = getBarrierName();
2443     if (!Name.empty())
2444       OS << "<barrier " << Name << ">";
2445     else
2446       OS << "<barrier invalid #" << getBarrier() << ">";
2447     break;
2448   }
2449   case k_Immediate:
2450     OS << *getImm();
2451     break;
2452   case k_ShiftedImm: {
2453     unsigned Shift = getShiftedImmShift();
2454     OS << "<shiftedimm ";
2455     OS << *getShiftedImmVal();
2456     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2457     break;
2458   }
2459   case k_ImmRange: {
2460     OS << "<immrange ";
2461     OS << getFirstImmVal();
2462     OS << ":" << getLastImmVal() << ">";
2463     break;
2464   }
2465   case k_CondCode:
2466     OS << "<condcode " << getCondCode() << ">";
2467     break;
2468   case k_VectorList: {
2469     OS << "<vectorlist ";
2470     unsigned Reg = getVectorListStart();
2471     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2472       OS << Reg + i * getVectorListStride() << " ";
2473     OS << ">";
2474     break;
2475   }
2476   case k_VectorIndex:
2477     OS << "<vectorindex " << getVectorIndex() << ">";
2478     break;
2479   case k_SysReg:
2480     OS << "<sysreg: " << getSysReg() << '>';
2481     break;
2482   case k_Token:
2483     OS << "'" << getToken() << "'";
2484     break;
2485   case k_SysCR:
2486     OS << "c" << getSysCR();
2487     break;
2488   case k_Prefetch: {
2489     StringRef Name = getPrefetchName();
2490     if (!Name.empty())
2491       OS << "<prfop " << Name << ">";
2492     else
2493       OS << "<prfop invalid #" << getPrefetch() << ">";
2494     break;
2495   }
2496   case k_PSBHint:
2497     OS << getPSBHintName();
2498     break;
2499   case k_BTIHint:
2500     OS << getBTIHintName();
2501     break;
2502   case k_MatrixRegister:
2503     OS << "<matrix " << getMatrixReg() << ">";
2504     break;
2505   case k_MatrixTileList: {
2506     OS << "<matrixlist ";
2507     unsigned RegMask = getMatrixTileListRegMask();
2508     unsigned MaxBits = 8;
2509     for (unsigned I = MaxBits; I > 0; --I)
2510       OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2511     OS << '>';
2512     break;
2513   }
2514   case k_SVCR: {
2515     OS << getSVCR();
2516     break;
2517   }
2518   case k_Register:
2519     OS << "<register " << getReg() << ">";
2520     if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2521       break;
2522     [[fallthrough]];
2523   case k_ShiftExtend:
2524     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2525        << getShiftExtendAmount();
2526     if (!hasShiftExtendAmount())
2527       OS << "<imp>";
2528     OS << '>';
2529     break;
2530   }
2531 }
2532 
2533 /// @name Auto-generated Match Functions
2534 /// {
2535 
2536 static unsigned MatchRegisterName(StringRef Name);
2537 
2538 /// }
2539 
2540 static unsigned MatchNeonVectorRegName(StringRef Name) {
2541   return StringSwitch<unsigned>(Name.lower())
2542       .Case("v0", AArch64::Q0)
2543       .Case("v1", AArch64::Q1)
2544       .Case("v2", AArch64::Q2)
2545       .Case("v3", AArch64::Q3)
2546       .Case("v4", AArch64::Q4)
2547       .Case("v5", AArch64::Q5)
2548       .Case("v6", AArch64::Q6)
2549       .Case("v7", AArch64::Q7)
2550       .Case("v8", AArch64::Q8)
2551       .Case("v9", AArch64::Q9)
2552       .Case("v10", AArch64::Q10)
2553       .Case("v11", AArch64::Q11)
2554       .Case("v12", AArch64::Q12)
2555       .Case("v13", AArch64::Q13)
2556       .Case("v14", AArch64::Q14)
2557       .Case("v15", AArch64::Q15)
2558       .Case("v16", AArch64::Q16)
2559       .Case("v17", AArch64::Q17)
2560       .Case("v18", AArch64::Q18)
2561       .Case("v19", AArch64::Q19)
2562       .Case("v20", AArch64::Q20)
2563       .Case("v21", AArch64::Q21)
2564       .Case("v22", AArch64::Q22)
2565       .Case("v23", AArch64::Q23)
2566       .Case("v24", AArch64::Q24)
2567       .Case("v25", AArch64::Q25)
2568       .Case("v26", AArch64::Q26)
2569       .Case("v27", AArch64::Q27)
2570       .Case("v28", AArch64::Q28)
2571       .Case("v29", AArch64::Q29)
2572       .Case("v30", AArch64::Q30)
2573       .Case("v31", AArch64::Q31)
2574       .Default(0);
2575 }
2576 
2577 /// Returns an optional pair of (#elements, element-width) if Suffix
2578 /// is a valid vector kind. Where the number of elements in a vector
2579 /// or the vector width is implicit or explicitly unknown (but still a
2580 /// valid suffix kind), 0 is used.
2581 static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2582                                                           RegKind VectorKind) {
2583   std::pair<int, int> Res = {-1, -1};
2584 
2585   switch (VectorKind) {
2586   case RegKind::NeonVector:
2587     Res =
2588         StringSwitch<std::pair<int, int>>(Suffix.lower())
2589             .Case("", {0, 0})
2590             .Case(".1d", {1, 64})
2591             .Case(".1q", {1, 128})
2592             // '.2h' needed for fp16 scalar pairwise reductions
2593             .Case(".2h", {2, 16})
2594             .Case(".2s", {2, 32})
2595             .Case(".2d", {2, 64})
2596             // '.4b' is another special case for the ARMv8.2a dot product
2597             // operand
2598             .Case(".4b", {4, 8})
2599             .Case(".4h", {4, 16})
2600             .Case(".4s", {4, 32})
2601             .Case(".8b", {8, 8})
2602             .Case(".8h", {8, 16})
2603             .Case(".16b", {16, 8})
2604             // Accept the width neutral ones, too, for verbose syntax. If those
2605             // aren't used in the right places, the token operand won't match so
2606             // all will work out.
2607             .Case(".b", {0, 8})
2608             .Case(".h", {0, 16})
2609             .Case(".s", {0, 32})
2610             .Case(".d", {0, 64})
2611             .Default({-1, -1});
2612     break;
2613   case RegKind::SVEPredicateAsCounter:
2614   case RegKind::SVEPredicateVector:
2615   case RegKind::SVEDataVector:
2616   case RegKind::Matrix:
2617     Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2618               .Case("", {0, 0})
2619               .Case(".b", {0, 8})
2620               .Case(".h", {0, 16})
2621               .Case(".s", {0, 32})
2622               .Case(".d", {0, 64})
2623               .Case(".q", {0, 128})
2624               .Default({-1, -1});
2625     break;
2626   default:
2627     llvm_unreachable("Unsupported RegKind");
2628   }
2629 
2630   if (Res == std::make_pair(-1, -1))
2631     return std::nullopt;
2632 
2633   return std::optional<std::pair<int, int>>(Res);
2634 }
2635 
2636 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2637   return parseVectorKind(Suffix, VectorKind).has_value();
2638 }
2639 
2640 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2641   return StringSwitch<unsigned>(Name.lower())
2642       .Case("z0", AArch64::Z0)
2643       .Case("z1", AArch64::Z1)
2644       .Case("z2", AArch64::Z2)
2645       .Case("z3", AArch64::Z3)
2646       .Case("z4", AArch64::Z4)
2647       .Case("z5", AArch64::Z5)
2648       .Case("z6", AArch64::Z6)
2649       .Case("z7", AArch64::Z7)
2650       .Case("z8", AArch64::Z8)
2651       .Case("z9", AArch64::Z9)
2652       .Case("z10", AArch64::Z10)
2653       .Case("z11", AArch64::Z11)
2654       .Case("z12", AArch64::Z12)
2655       .Case("z13", AArch64::Z13)
2656       .Case("z14", AArch64::Z14)
2657       .Case("z15", AArch64::Z15)
2658       .Case("z16", AArch64::Z16)
2659       .Case("z17", AArch64::Z17)
2660       .Case("z18", AArch64::Z18)
2661       .Case("z19", AArch64::Z19)
2662       .Case("z20", AArch64::Z20)
2663       .Case("z21", AArch64::Z21)
2664       .Case("z22", AArch64::Z22)
2665       .Case("z23", AArch64::Z23)
2666       .Case("z24", AArch64::Z24)
2667       .Case("z25", AArch64::Z25)
2668       .Case("z26", AArch64::Z26)
2669       .Case("z27", AArch64::Z27)
2670       .Case("z28", AArch64::Z28)
2671       .Case("z29", AArch64::Z29)
2672       .Case("z30", AArch64::Z30)
2673       .Case("z31", AArch64::Z31)
2674       .Default(0);
2675 }
2676 
2677 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2678   return StringSwitch<unsigned>(Name.lower())
2679       .Case("p0", AArch64::P0)
2680       .Case("p1", AArch64::P1)
2681       .Case("p2", AArch64::P2)
2682       .Case("p3", AArch64::P3)
2683       .Case("p4", AArch64::P4)
2684       .Case("p5", AArch64::P5)
2685       .Case("p6", AArch64::P6)
2686       .Case("p7", AArch64::P7)
2687       .Case("p8", AArch64::P8)
2688       .Case("p9", AArch64::P9)
2689       .Case("p10", AArch64::P10)
2690       .Case("p11", AArch64::P11)
2691       .Case("p12", AArch64::P12)
2692       .Case("p13", AArch64::P13)
2693       .Case("p14", AArch64::P14)
2694       .Case("p15", AArch64::P15)
2695       .Default(0);
2696 }
2697 
2698 static unsigned matchSVEPredicateAsCounterRegName(StringRef Name) {
2699   return StringSwitch<unsigned>(Name.lower())
2700       .Case("pn0", AArch64::P0)
2701       .Case("pn1", AArch64::P1)
2702       .Case("pn2", AArch64::P2)
2703       .Case("pn3", AArch64::P3)
2704       .Case("pn4", AArch64::P4)
2705       .Case("pn5", AArch64::P5)
2706       .Case("pn6", AArch64::P6)
2707       .Case("pn7", AArch64::P7)
2708       .Case("pn8", AArch64::P8)
2709       .Case("pn9", AArch64::P9)
2710       .Case("pn10", AArch64::P10)
2711       .Case("pn11", AArch64::P11)
2712       .Case("pn12", AArch64::P12)
2713       .Case("pn13", AArch64::P13)
2714       .Case("pn14", AArch64::P14)
2715       .Case("pn15", AArch64::P15)
2716       .Default(0);
2717 }
2718 
2719 static unsigned matchMatrixTileListRegName(StringRef Name) {
2720   return StringSwitch<unsigned>(Name.lower())
2721       .Case("za0.d", AArch64::ZAD0)
2722       .Case("za1.d", AArch64::ZAD1)
2723       .Case("za2.d", AArch64::ZAD2)
2724       .Case("za3.d", AArch64::ZAD3)
2725       .Case("za4.d", AArch64::ZAD4)
2726       .Case("za5.d", AArch64::ZAD5)
2727       .Case("za6.d", AArch64::ZAD6)
2728       .Case("za7.d", AArch64::ZAD7)
2729       .Case("za0.s", AArch64::ZAS0)
2730       .Case("za1.s", AArch64::ZAS1)
2731       .Case("za2.s", AArch64::ZAS2)
2732       .Case("za3.s", AArch64::ZAS3)
2733       .Case("za0.h", AArch64::ZAH0)
2734       .Case("za1.h", AArch64::ZAH1)
2735       .Case("za0.b", AArch64::ZAB0)
2736       .Default(0);
2737 }
2738 
2739 static unsigned matchMatrixRegName(StringRef Name) {
2740   return StringSwitch<unsigned>(Name.lower())
2741       .Case("za", AArch64::ZA)
2742       .Case("za0.q", AArch64::ZAQ0)
2743       .Case("za1.q", AArch64::ZAQ1)
2744       .Case("za2.q", AArch64::ZAQ2)
2745       .Case("za3.q", AArch64::ZAQ3)
2746       .Case("za4.q", AArch64::ZAQ4)
2747       .Case("za5.q", AArch64::ZAQ5)
2748       .Case("za6.q", AArch64::ZAQ6)
2749       .Case("za7.q", AArch64::ZAQ7)
2750       .Case("za8.q", AArch64::ZAQ8)
2751       .Case("za9.q", AArch64::ZAQ9)
2752       .Case("za10.q", AArch64::ZAQ10)
2753       .Case("za11.q", AArch64::ZAQ11)
2754       .Case("za12.q", AArch64::ZAQ12)
2755       .Case("za13.q", AArch64::ZAQ13)
2756       .Case("za14.q", AArch64::ZAQ14)
2757       .Case("za15.q", AArch64::ZAQ15)
2758       .Case("za0.d", AArch64::ZAD0)
2759       .Case("za1.d", AArch64::ZAD1)
2760       .Case("za2.d", AArch64::ZAD2)
2761       .Case("za3.d", AArch64::ZAD3)
2762       .Case("za4.d", AArch64::ZAD4)
2763       .Case("za5.d", AArch64::ZAD5)
2764       .Case("za6.d", AArch64::ZAD6)
2765       .Case("za7.d", AArch64::ZAD7)
2766       .Case("za0.s", AArch64::ZAS0)
2767       .Case("za1.s", AArch64::ZAS1)
2768       .Case("za2.s", AArch64::ZAS2)
2769       .Case("za3.s", AArch64::ZAS3)
2770       .Case("za0.h", AArch64::ZAH0)
2771       .Case("za1.h", AArch64::ZAH1)
2772       .Case("za0.b", AArch64::ZAB0)
2773       .Case("za0h.q", AArch64::ZAQ0)
2774       .Case("za1h.q", AArch64::ZAQ1)
2775       .Case("za2h.q", AArch64::ZAQ2)
2776       .Case("za3h.q", AArch64::ZAQ3)
2777       .Case("za4h.q", AArch64::ZAQ4)
2778       .Case("za5h.q", AArch64::ZAQ5)
2779       .Case("za6h.q", AArch64::ZAQ6)
2780       .Case("za7h.q", AArch64::ZAQ7)
2781       .Case("za8h.q", AArch64::ZAQ8)
2782       .Case("za9h.q", AArch64::ZAQ9)
2783       .Case("za10h.q", AArch64::ZAQ10)
2784       .Case("za11h.q", AArch64::ZAQ11)
2785       .Case("za12h.q", AArch64::ZAQ12)
2786       .Case("za13h.q", AArch64::ZAQ13)
2787       .Case("za14h.q", AArch64::ZAQ14)
2788       .Case("za15h.q", AArch64::ZAQ15)
2789       .Case("za0h.d", AArch64::ZAD0)
2790       .Case("za1h.d", AArch64::ZAD1)
2791       .Case("za2h.d", AArch64::ZAD2)
2792       .Case("za3h.d", AArch64::ZAD3)
2793       .Case("za4h.d", AArch64::ZAD4)
2794       .Case("za5h.d", AArch64::ZAD5)
2795       .Case("za6h.d", AArch64::ZAD6)
2796       .Case("za7h.d", AArch64::ZAD7)
2797       .Case("za0h.s", AArch64::ZAS0)
2798       .Case("za1h.s", AArch64::ZAS1)
2799       .Case("za2h.s", AArch64::ZAS2)
2800       .Case("za3h.s", AArch64::ZAS3)
2801       .Case("za0h.h", AArch64::ZAH0)
2802       .Case("za1h.h", AArch64::ZAH1)
2803       .Case("za0h.b", AArch64::ZAB0)
2804       .Case("za0v.q", AArch64::ZAQ0)
2805       .Case("za1v.q", AArch64::ZAQ1)
2806       .Case("za2v.q", AArch64::ZAQ2)
2807       .Case("za3v.q", AArch64::ZAQ3)
2808       .Case("za4v.q", AArch64::ZAQ4)
2809       .Case("za5v.q", AArch64::ZAQ5)
2810       .Case("za6v.q", AArch64::ZAQ6)
2811       .Case("za7v.q", AArch64::ZAQ7)
2812       .Case("za8v.q", AArch64::ZAQ8)
2813       .Case("za9v.q", AArch64::ZAQ9)
2814       .Case("za10v.q", AArch64::ZAQ10)
2815       .Case("za11v.q", AArch64::ZAQ11)
2816       .Case("za12v.q", AArch64::ZAQ12)
2817       .Case("za13v.q", AArch64::ZAQ13)
2818       .Case("za14v.q", AArch64::ZAQ14)
2819       .Case("za15v.q", AArch64::ZAQ15)
2820       .Case("za0v.d", AArch64::ZAD0)
2821       .Case("za1v.d", AArch64::ZAD1)
2822       .Case("za2v.d", AArch64::ZAD2)
2823       .Case("za3v.d", AArch64::ZAD3)
2824       .Case("za4v.d", AArch64::ZAD4)
2825       .Case("za5v.d", AArch64::ZAD5)
2826       .Case("za6v.d", AArch64::ZAD6)
2827       .Case("za7v.d", AArch64::ZAD7)
2828       .Case("za0v.s", AArch64::ZAS0)
2829       .Case("za1v.s", AArch64::ZAS1)
2830       .Case("za2v.s", AArch64::ZAS2)
2831       .Case("za3v.s", AArch64::ZAS3)
2832       .Case("za0v.h", AArch64::ZAH0)
2833       .Case("za1v.h", AArch64::ZAH1)
2834       .Case("za0v.b", AArch64::ZAB0)
2835       .Default(0);
2836 }
2837 
2838 bool AArch64AsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
2839                                      SMLoc &EndLoc) {
2840   return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2841 }
2842 
2843 OperandMatchResultTy AArch64AsmParser::tryParseRegister(MCRegister &RegNo,
2844                                                         SMLoc &StartLoc,
2845                                                         SMLoc &EndLoc) {
2846   StartLoc = getLoc();
2847   auto Res = tryParseScalarRegister(RegNo);
2848   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2849   return Res;
2850 }
2851 
2852 // Matches a register name or register alias previously defined by '.req'
2853 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2854                                                   RegKind Kind) {
2855   unsigned RegNum = 0;
2856   if ((RegNum = matchSVEDataVectorRegName(Name)))
2857     return Kind == RegKind::SVEDataVector ? RegNum : 0;
2858 
2859   if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2860     return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2861 
2862   if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
2863     return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2864 
2865   if ((RegNum = MatchNeonVectorRegName(Name)))
2866     return Kind == RegKind::NeonVector ? RegNum : 0;
2867 
2868   if ((RegNum = matchMatrixRegName(Name)))
2869     return Kind == RegKind::Matrix ? RegNum : 0;
2870 
2871  if (Name.equals_insensitive("zt0"))
2872     return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2873 
2874   // The parsed register must be of RegKind Scalar
2875   if ((RegNum = MatchRegisterName(Name)))
2876     return (Kind == RegKind::Scalar) ? RegNum : 0;
2877 
2878   if (!RegNum) {
2879     // Handle a few common aliases of registers.
2880     if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2881                     .Case("fp", AArch64::FP)
2882                     .Case("lr",  AArch64::LR)
2883                     .Case("x31", AArch64::XZR)
2884                     .Case("w31", AArch64::WZR)
2885                     .Default(0))
2886       return Kind == RegKind::Scalar ? RegNum : 0;
2887 
2888     // Check for aliases registered via .req. Canonicalize to lower case.
2889     // That's more consistent since register names are case insensitive, and
2890     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2891     auto Entry = RegisterReqs.find(Name.lower());
2892     if (Entry == RegisterReqs.end())
2893       return 0;
2894 
2895     // set RegNum if the match is the right kind of register
2896     if (Kind == Entry->getValue().first)
2897       RegNum = Entry->getValue().second;
2898   }
2899   return RegNum;
2900 }
2901 
2902 unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2903   switch (K) {
2904   case RegKind::Scalar:
2905   case RegKind::NeonVector:
2906   case RegKind::SVEDataVector:
2907     return 32;
2908   case RegKind::Matrix:
2909   case RegKind::SVEPredicateVector:
2910   case RegKind::SVEPredicateAsCounter:
2911     return 16;
2912   case RegKind::LookupTable:
2913     return 1;
2914   }
2915   llvm_unreachable("Unsupported RegKind");
2916 }
2917 
2918 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2919 /// Identifier when called, and if it is a register name the token is eaten and
2920 /// the register is added to the operand list.
2921 ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
2922   const AsmToken &Tok = getTok();
2923   if (Tok.isNot(AsmToken::Identifier))
2924     return ParseStatus::NoMatch;
2925 
2926   std::string lowerCase = Tok.getString().lower();
2927   unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2928   if (Reg == 0)
2929     return ParseStatus::NoMatch;
2930 
2931   RegNum = Reg;
2932   Lex(); // Eat identifier token.
2933   return ParseStatus::Success;
2934 }
2935 
2936 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2937 ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2938   SMLoc S = getLoc();
2939 
2940   if (getTok().isNot(AsmToken::Identifier))
2941     return Error(S, "Expected cN operand where 0 <= N <= 15");
2942 
2943   StringRef Tok = getTok().getIdentifier();
2944   if (Tok[0] != 'c' && Tok[0] != 'C')
2945     return Error(S, "Expected cN operand where 0 <= N <= 15");
2946 
2947   uint32_t CRNum;
2948   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2949   if (BadNum || CRNum > 15)
2950     return Error(S, "Expected cN operand where 0 <= N <= 15");
2951 
2952   Lex(); // Eat identifier token.
2953   Operands.push_back(
2954       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2955   return ParseStatus::Success;
2956 }
2957 
2958 // Either an identifier for named values or a 6-bit immediate.
2959 ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
2960   SMLoc S = getLoc();
2961   const AsmToken &Tok = getTok();
2962 
2963   unsigned MaxVal = 63;
2964 
2965   // Immediate case, with optional leading hash:
2966   if (parseOptionalToken(AsmToken::Hash) ||
2967       Tok.is(AsmToken::Integer)) {
2968     const MCExpr *ImmVal;
2969     if (getParser().parseExpression(ImmVal))
2970       return ParseStatus::Failure;
2971 
2972     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2973     if (!MCE)
2974       return TokError("immediate value expected for prefetch operand");
2975     unsigned prfop = MCE->getValue();
2976     if (prfop > MaxVal)
2977       return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2978                       "] expected");
2979 
2980     auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
2981     Operands.push_back(AArch64Operand::CreatePrefetch(
2982         prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
2983     return ParseStatus::Success;
2984   }
2985 
2986   if (Tok.isNot(AsmToken::Identifier))
2987     return TokError("prefetch hint expected");
2988 
2989   auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
2990   if (!RPRFM)
2991     return TokError("prefetch hint expected");
2992 
2993   Operands.push_back(AArch64Operand::CreatePrefetch(
2994       RPRFM->Encoding, Tok.getString(), S, getContext()));
2995   Lex(); // Eat identifier token.
2996   return ParseStatus::Success;
2997 }
2998 
2999 /// tryParsePrefetch - Try to parse a prefetch operand.
3000 template <bool IsSVEPrefetch>
3001 ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3002   SMLoc S = getLoc();
3003   const AsmToken &Tok = getTok();
3004 
3005   auto LookupByName = [](StringRef N) {
3006     if (IsSVEPrefetch) {
3007       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3008         return std::optional<unsigned>(Res->Encoding);
3009     } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3010       return std::optional<unsigned>(Res->Encoding);
3011     return std::optional<unsigned>();
3012   };
3013 
3014   auto LookupByEncoding = [](unsigned E) {
3015     if (IsSVEPrefetch) {
3016       if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3017         return std::optional<StringRef>(Res->Name);
3018     } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3019       return std::optional<StringRef>(Res->Name);
3020     return std::optional<StringRef>();
3021   };
3022   unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3023 
3024   // Either an identifier for named values or a 5-bit immediate.
3025   // Eat optional hash.
3026   if (parseOptionalToken(AsmToken::Hash) ||
3027       Tok.is(AsmToken::Integer)) {
3028     const MCExpr *ImmVal;
3029     if (getParser().parseExpression(ImmVal))
3030       return ParseStatus::Failure;
3031 
3032     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3033     if (!MCE)
3034       return TokError("immediate value expected for prefetch operand");
3035     unsigned prfop = MCE->getValue();
3036     if (prfop > MaxVal)
3037       return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3038                       "] expected");
3039 
3040     auto PRFM = LookupByEncoding(MCE->getValue());
3041     Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3042                                                       S, getContext()));
3043     return ParseStatus::Success;
3044   }
3045 
3046   if (Tok.isNot(AsmToken::Identifier))
3047     return TokError("prefetch hint expected");
3048 
3049   auto PRFM = LookupByName(Tok.getString());
3050   if (!PRFM)
3051     return TokError("prefetch hint expected");
3052 
3053   Operands.push_back(AArch64Operand::CreatePrefetch(
3054       *PRFM, Tok.getString(), S, getContext()));
3055   Lex(); // Eat identifier token.
3056   return ParseStatus::Success;
3057 }
3058 
3059 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3060 ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3061   SMLoc S = getLoc();
3062   const AsmToken &Tok = getTok();
3063   if (Tok.isNot(AsmToken::Identifier))
3064     return TokError("invalid operand for instruction");
3065 
3066   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3067   if (!PSB)
3068     return TokError("invalid operand for instruction");
3069 
3070   Operands.push_back(AArch64Operand::CreatePSBHint(
3071       PSB->Encoding, Tok.getString(), S, getContext()));
3072   Lex(); // Eat identifier token.
3073   return ParseStatus::Success;
3074 }
3075 
3076 ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3077   SMLoc StartLoc = getLoc();
3078 
3079   MCRegister RegNum;
3080 
3081   // The case where xzr, xzr is not present is handled by an InstAlias.
3082 
3083   auto RegTok = getTok(); // in case we need to backtrack
3084   if (!tryParseScalarRegister(RegNum).isSuccess())
3085     return ParseStatus::NoMatch;
3086 
3087   if (RegNum != AArch64::XZR) {
3088     getLexer().UnLex(RegTok);
3089     return ParseStatus::NoMatch;
3090   }
3091 
3092   if (parseComma())
3093     return ParseStatus::Failure;
3094 
3095   if (!tryParseScalarRegister(RegNum).isSuccess())
3096     return TokError("expected register operand");
3097 
3098   if (RegNum != AArch64::XZR)
3099     return TokError("xzr must be followed by xzr");
3100 
3101   // We need to push something, since we claim this is an operand in .td.
3102   // See also AArch64AsmParser::parseKeywordOperand.
3103   Operands.push_back(AArch64Operand::CreateReg(
3104       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3105 
3106   return ParseStatus::Success;
3107 }
3108 
3109 /// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3110 ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3111   SMLoc S = getLoc();
3112   const AsmToken &Tok = getTok();
3113   if (Tok.isNot(AsmToken::Identifier))
3114     return TokError("invalid operand for instruction");
3115 
3116   auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3117   if (!BTI)
3118     return TokError("invalid operand for instruction");
3119 
3120   Operands.push_back(AArch64Operand::CreateBTIHint(
3121       BTI->Encoding, Tok.getString(), S, getContext()));
3122   Lex(); // Eat identifier token.
3123   return ParseStatus::Success;
3124 }
3125 
3126 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3127 /// instruction.
3128 ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3129   SMLoc S = getLoc();
3130   const MCExpr *Expr = nullptr;
3131 
3132   if (getTok().is(AsmToken::Hash)) {
3133     Lex(); // Eat hash token.
3134   }
3135 
3136   if (parseSymbolicImmVal(Expr))
3137     return ParseStatus::Failure;
3138 
3139   AArch64MCExpr::VariantKind ELFRefKind;
3140   MCSymbolRefExpr::VariantKind DarwinRefKind;
3141   int64_t Addend;
3142   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3143     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3144         ELFRefKind == AArch64MCExpr::VK_INVALID) {
3145       // No modifier was specified at all; this is the syntax for an ELF basic
3146       // ADRP relocation (unfortunately).
3147       Expr =
3148           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
3149     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
3150                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
3151                Addend != 0) {
3152       return Error(S, "gotpage label reference not allowed an addend");
3153     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
3154                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
3155                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
3156                ELFRefKind != AArch64MCExpr::VK_ABS_PAGE_NC &&
3157                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
3158                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE_LO15 &&
3159                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
3160                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
3161       // The operand must be an @page or @gotpage qualified symbolref.
3162       return Error(S, "page or gotpage label reference expected");
3163     }
3164   }
3165 
3166   // We have either a label reference possibly with addend or an immediate. The
3167   // addend is a raw value here. The linker will adjust it to only reference the
3168   // page.
3169   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3170   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3171 
3172   return ParseStatus::Success;
3173 }
3174 
3175 /// tryParseAdrLabel - Parse and validate a source label for the ADR
3176 /// instruction.
3177 ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3178   SMLoc S = getLoc();
3179   const MCExpr *Expr = nullptr;
3180 
3181   // Leave anything with a bracket to the default for SVE
3182   if (getTok().is(AsmToken::LBrac))
3183     return ParseStatus::NoMatch;
3184 
3185   if (getTok().is(AsmToken::Hash))
3186     Lex(); // Eat hash token.
3187 
3188   if (parseSymbolicImmVal(Expr))
3189     return ParseStatus::Failure;
3190 
3191   AArch64MCExpr::VariantKind ELFRefKind;
3192   MCSymbolRefExpr::VariantKind DarwinRefKind;
3193   int64_t Addend;
3194   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3195     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
3196         ELFRefKind == AArch64MCExpr::VK_INVALID) {
3197       // No modifier was specified at all; this is the syntax for an ELF basic
3198       // ADR relocation (unfortunately).
3199       Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS, getContext());
3200     } else {
3201       return Error(S, "unexpected adr label");
3202     }
3203   }
3204 
3205   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3206   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3207   return ParseStatus::Success;
3208 }
3209 
3210 /// tryParseFPImm - A floating point immediate expression operand.
3211 template <bool AddFPZeroAsLiteral>
3212 ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3213   SMLoc S = getLoc();
3214 
3215   bool Hash = parseOptionalToken(AsmToken::Hash);
3216 
3217   // Handle negation, as that still comes through as a separate token.
3218   bool isNegative = parseOptionalToken(AsmToken::Minus);
3219 
3220   const AsmToken &Tok = getTok();
3221   if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3222     if (!Hash)
3223       return ParseStatus::NoMatch;
3224     return TokError("invalid floating point immediate");
3225   }
3226 
3227   // Parse hexadecimal representation.
3228   if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
3229     if (Tok.getIntVal() > 255 || isNegative)
3230       return TokError("encoded floating point value out of range");
3231 
3232     APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
3233     Operands.push_back(
3234         AArch64Operand::CreateFPImm(F, true, S, getContext()));
3235   } else {
3236     // Parse FP representation.
3237     APFloat RealVal(APFloat::IEEEdouble());
3238     auto StatusOrErr =
3239         RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3240     if (errorToBool(StatusOrErr.takeError()))
3241       return TokError("invalid floating point representation");
3242 
3243     if (isNegative)
3244       RealVal.changeSign();
3245 
3246     if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3247       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3248       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3249     } else
3250       Operands.push_back(AArch64Operand::CreateFPImm(
3251           RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3252   }
3253 
3254   Lex(); // Eat the token.
3255 
3256   return ParseStatus::Success;
3257 }
3258 
3259 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3260 /// a shift suffix, for example '#1, lsl #12'.
3261 ParseStatus
3262 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3263   SMLoc S = getLoc();
3264 
3265   if (getTok().is(AsmToken::Hash))
3266     Lex(); // Eat '#'
3267   else if (getTok().isNot(AsmToken::Integer))
3268     // Operand should start from # or should be integer, emit error otherwise.
3269     return ParseStatus::NoMatch;
3270 
3271   if (getTok().is(AsmToken::Integer) &&
3272       getLexer().peekTok().is(AsmToken::Colon))
3273     return tryParseImmRange(Operands);
3274 
3275   const MCExpr *Imm = nullptr;
3276   if (parseSymbolicImmVal(Imm))
3277     return ParseStatus::Failure;
3278   else if (getTok().isNot(AsmToken::Comma)) {
3279     Operands.push_back(
3280         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3281     return ParseStatus::Success;
3282   }
3283 
3284   // Eat ','
3285   Lex();
3286   StringRef VecGroup;
3287   if (!parseOptionalVGOperand(Operands, VecGroup)) {
3288     Operands.push_back(
3289         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3290     Operands.push_back(
3291         AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3292     return ParseStatus::Success;
3293   }
3294 
3295   // The optional operand must be "lsl #N" where N is non-negative.
3296   if (!getTok().is(AsmToken::Identifier) ||
3297       !getTok().getIdentifier().equals_insensitive("lsl"))
3298     return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3299 
3300   // Eat 'lsl'
3301   Lex();
3302 
3303   parseOptionalToken(AsmToken::Hash);
3304 
3305   if (getTok().isNot(AsmToken::Integer))
3306     return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3307 
3308   int64_t ShiftAmount = getTok().getIntVal();
3309 
3310   if (ShiftAmount < 0)
3311     return Error(getLoc(), "positive shift amount required");
3312   Lex(); // Eat the number
3313 
3314   // Just in case the optional lsl #0 is used for immediates other than zero.
3315   if (ShiftAmount == 0 && Imm != nullptr) {
3316     Operands.push_back(
3317         AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3318     return ParseStatus::Success;
3319   }
3320 
3321   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3322                                                       getLoc(), getContext()));
3323   return ParseStatus::Success;
3324 }
3325 
3326 /// parseCondCodeString - Parse a Condition Code string, optionally returning a
3327 /// suggestion to help common typos.
3328 AArch64CC::CondCode
3329 AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3330   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3331                     .Case("eq", AArch64CC::EQ)
3332                     .Case("ne", AArch64CC::NE)
3333                     .Case("cs", AArch64CC::HS)
3334                     .Case("hs", AArch64CC::HS)
3335                     .Case("cc", AArch64CC::LO)
3336                     .Case("lo", AArch64CC::LO)
3337                     .Case("mi", AArch64CC::MI)
3338                     .Case("pl", AArch64CC::PL)
3339                     .Case("vs", AArch64CC::VS)
3340                     .Case("vc", AArch64CC::VC)
3341                     .Case("hi", AArch64CC::HI)
3342                     .Case("ls", AArch64CC::LS)
3343                     .Case("ge", AArch64CC::GE)
3344                     .Case("lt", AArch64CC::LT)
3345                     .Case("gt", AArch64CC::GT)
3346                     .Case("le", AArch64CC::LE)
3347                     .Case("al", AArch64CC::AL)
3348                     .Case("nv", AArch64CC::NV)
3349                     .Default(AArch64CC::Invalid);
3350 
3351   if (CC == AArch64CC::Invalid && getSTI().hasFeature(AArch64::FeatureSVE)) {
3352     CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3353                     .Case("none",  AArch64CC::EQ)
3354                     .Case("any",   AArch64CC::NE)
3355                     .Case("nlast", AArch64CC::HS)
3356                     .Case("last",  AArch64CC::LO)
3357                     .Case("first", AArch64CC::MI)
3358                     .Case("nfrst", AArch64CC::PL)
3359                     .Case("pmore", AArch64CC::HI)
3360                     .Case("plast", AArch64CC::LS)
3361                     .Case("tcont", AArch64CC::GE)
3362                     .Case("tstop", AArch64CC::LT)
3363                     .Default(AArch64CC::Invalid);
3364 
3365     if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3366       Suggestion = "nfrst";
3367   }
3368   return CC;
3369 }
3370 
3371 /// parseCondCode - Parse a Condition Code operand.
3372 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3373                                      bool invertCondCode) {
3374   SMLoc S = getLoc();
3375   const AsmToken &Tok = getTok();
3376   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3377 
3378   StringRef Cond = Tok.getString();
3379   std::string Suggestion;
3380   AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3381   if (CC == AArch64CC::Invalid) {
3382     std::string Msg = "invalid condition code";
3383     if (!Suggestion.empty())
3384       Msg += ", did you mean " + Suggestion + "?";
3385     return TokError(Msg);
3386   }
3387   Lex(); // Eat identifier token.
3388 
3389   if (invertCondCode) {
3390     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3391       return TokError("condition codes AL and NV are invalid for this instruction");
3392     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
3393   }
3394 
3395   Operands.push_back(
3396       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3397   return false;
3398 }
3399 
3400 ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3401   const AsmToken &Tok = getTok();
3402   SMLoc S = getLoc();
3403 
3404   if (Tok.isNot(AsmToken::Identifier))
3405     return TokError("invalid operand for instruction");
3406 
3407   unsigned PStateImm = -1;
3408   const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3409   if (!SVCR)
3410     return ParseStatus::NoMatch;
3411   if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3412     PStateImm = SVCR->Encoding;
3413 
3414   Operands.push_back(
3415       AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3416   Lex(); // Eat identifier token.
3417   return ParseStatus::Success;
3418 }
3419 
3420 ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3421   const AsmToken &Tok = getTok();
3422   SMLoc S = getLoc();
3423 
3424   StringRef Name = Tok.getString();
3425 
3426   if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3427     Lex(); // eat "za[.(b|h|s|d)]"
3428     unsigned ElementWidth = 0;
3429     auto DotPosition = Name.find('.');
3430     if (DotPosition != StringRef::npos) {
3431       const auto &KindRes =
3432           parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3433       if (!KindRes)
3434         return TokError(
3435             "Expected the register to be followed by element width suffix");
3436       ElementWidth = KindRes->second;
3437     }
3438     Operands.push_back(AArch64Operand::CreateMatrixRegister(
3439         AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3440         getContext()));
3441     if (getLexer().is(AsmToken::LBrac)) {
3442       // There's no comma after matrix operand, so we can parse the next operand
3443       // immediately.
3444       if (parseOperand(Operands, false, false))
3445         return ParseStatus::NoMatch;
3446     }
3447     return ParseStatus::Success;
3448   }
3449 
3450   // Try to parse matrix register.
3451   unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3452   if (!Reg)
3453     return ParseStatus::NoMatch;
3454 
3455   size_t DotPosition = Name.find('.');
3456   assert(DotPosition != StringRef::npos && "Unexpected register");
3457 
3458   StringRef Head = Name.take_front(DotPosition);
3459   StringRef Tail = Name.drop_front(DotPosition);
3460   StringRef RowOrColumn = Head.take_back();
3461 
3462   MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3463                         .Case("h", MatrixKind::Row)
3464                         .Case("v", MatrixKind::Col)
3465                         .Default(MatrixKind::Tile);
3466 
3467   // Next up, parsing the suffix
3468   const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3469   if (!KindRes)
3470     return TokError(
3471         "Expected the register to be followed by element width suffix");
3472   unsigned ElementWidth = KindRes->second;
3473 
3474   Lex();
3475 
3476   Operands.push_back(AArch64Operand::CreateMatrixRegister(
3477       Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3478 
3479   if (getLexer().is(AsmToken::LBrac)) {
3480     // There's no comma after matrix operand, so we can parse the next operand
3481     // immediately.
3482     if (parseOperand(Operands, false, false))
3483       return ParseStatus::NoMatch;
3484   }
3485   return ParseStatus::Success;
3486 }
3487 
3488 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3489 /// them if present.
3490 ParseStatus
3491 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3492   const AsmToken &Tok = getTok();
3493   std::string LowerID = Tok.getString().lower();
3494   AArch64_AM::ShiftExtendType ShOp =
3495       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3496           .Case("lsl", AArch64_AM::LSL)
3497           .Case("lsr", AArch64_AM::LSR)
3498           .Case("asr", AArch64_AM::ASR)
3499           .Case("ror", AArch64_AM::ROR)
3500           .Case("msl", AArch64_AM::MSL)
3501           .Case("uxtb", AArch64_AM::UXTB)
3502           .Case("uxth", AArch64_AM::UXTH)
3503           .Case("uxtw", AArch64_AM::UXTW)
3504           .Case("uxtx", AArch64_AM::UXTX)
3505           .Case("sxtb", AArch64_AM::SXTB)
3506           .Case("sxth", AArch64_AM::SXTH)
3507           .Case("sxtw", AArch64_AM::SXTW)
3508           .Case("sxtx", AArch64_AM::SXTX)
3509           .Default(AArch64_AM::InvalidShiftExtend);
3510 
3511   if (ShOp == AArch64_AM::InvalidShiftExtend)
3512     return ParseStatus::NoMatch;
3513 
3514   SMLoc S = Tok.getLoc();
3515   Lex();
3516 
3517   bool Hash = parseOptionalToken(AsmToken::Hash);
3518 
3519   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3520     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3521         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3522         ShOp == AArch64_AM::MSL) {
3523       // We expect a number here.
3524       return TokError("expected #imm after shift specifier");
3525     }
3526 
3527     // "extend" type operations don't need an immediate, #0 is implicit.
3528     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3529     Operands.push_back(
3530         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3531     return ParseStatus::Success;
3532   }
3533 
3534   // Make sure we do actually have a number, identifier or a parenthesized
3535   // expression.
3536   SMLoc E = getLoc();
3537   if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3538       !getTok().is(AsmToken::Identifier))
3539     return Error(E, "expected integer shift amount");
3540 
3541   const MCExpr *ImmVal;
3542   if (getParser().parseExpression(ImmVal))
3543     return ParseStatus::Failure;
3544 
3545   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3546   if (!MCE)
3547     return Error(E, "expected constant '#imm' after shift specifier");
3548 
3549   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3550   Operands.push_back(AArch64Operand::CreateShiftExtend(
3551       ShOp, MCE->getValue(), true, S, E, getContext()));
3552   return ParseStatus::Success;
3553 }
3554 
3555 static const struct Extension {
3556   const char *Name;
3557   const FeatureBitset Features;
3558 } ExtensionMap[] = {
3559     {"crc", {AArch64::FeatureCRC}},
3560     {"sm4", {AArch64::FeatureSM4}},
3561     {"sha3", {AArch64::FeatureSHA3}},
3562     {"sha2", {AArch64::FeatureSHA2}},
3563     {"aes", {AArch64::FeatureAES}},
3564     {"crypto", {AArch64::FeatureCrypto}},
3565     {"fp", {AArch64::FeatureFPARMv8}},
3566     {"simd", {AArch64::FeatureNEON}},
3567     {"ras", {AArch64::FeatureRAS}},
3568     {"rasv2", {AArch64::FeatureRASv2}},
3569     {"lse", {AArch64::FeatureLSE}},
3570     {"predres", {AArch64::FeaturePredRes}},
3571     {"predres2", {AArch64::FeatureSPECRES2}},
3572     {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3573     {"mte", {AArch64::FeatureMTE}},
3574     {"memtag", {AArch64::FeatureMTE}},
3575     {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3576     {"pan", {AArch64::FeaturePAN}},
3577     {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3578     {"ccpp", {AArch64::FeatureCCPP}},
3579     {"rcpc", {AArch64::FeatureRCPC}},
3580     {"rng", {AArch64::FeatureRandGen}},
3581     {"sve", {AArch64::FeatureSVE}},
3582     {"sve2", {AArch64::FeatureSVE2}},
3583     {"sve2-aes", {AArch64::FeatureSVE2AES}},
3584     {"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3585     {"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3586     {"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3587     {"sve2p1", {AArch64::FeatureSVE2p1}},
3588     {"b16b16", {AArch64::FeatureB16B16}},
3589     {"ls64", {AArch64::FeatureLS64}},
3590     {"xs", {AArch64::FeatureXS}},
3591     {"pauth", {AArch64::FeaturePAuth}},
3592     {"flagm", {AArch64::FeatureFlagM}},
3593     {"rme", {AArch64::FeatureRME}},
3594     {"sme", {AArch64::FeatureSME}},
3595     {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3596     {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3597     {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3598     {"sme2", {AArch64::FeatureSME2}},
3599     {"sme2p1", {AArch64::FeatureSME2p1}},
3600     {"hbc", {AArch64::FeatureHBC}},
3601     {"mops", {AArch64::FeatureMOPS}},
3602     {"mec", {AArch64::FeatureMEC}},
3603     {"the", {AArch64::FeatureTHE}},
3604     {"d128", {AArch64::FeatureD128}},
3605     {"lse128", {AArch64::FeatureLSE128}},
3606     {"ite", {AArch64::FeatureITE}},
3607     {"cssc", {AArch64::FeatureCSSC}},
3608     {"rcpc3", {AArch64::FeatureRCPC3}},
3609     {"gcs", {AArch64::FeatureGCS}},
3610     {"bf16", {AArch64::FeatureBF16}},
3611     {"compnum", {AArch64::FeatureComplxNum}},
3612     {"dotprod", {AArch64::FeatureDotProd}},
3613     {"f32mm", {AArch64::FeatureMatMulFP32}},
3614     {"f64mm", {AArch64::FeatureMatMulFP64}},
3615     {"fp16", {AArch64::FeatureFullFP16}},
3616     {"fp16fml", {AArch64::FeatureFP16FML}},
3617     {"i8mm", {AArch64::FeatureMatMulInt8}},
3618     {"lor", {AArch64::FeatureLOR}},
3619     {"profile", {AArch64::FeatureSPE}},
3620     // "rdma" is the name documented by binutils for the feature, but
3621     // binutils also accepts incomplete prefixes of features, so "rdm"
3622     // works too. Support both spellings here.
3623     {"rdm", {AArch64::FeatureRDM}},
3624     {"rdma", {AArch64::FeatureRDM}},
3625     {"sb", {AArch64::FeatureSB}},
3626     {"ssbs", {AArch64::FeatureSSBS}},
3627     {"tme", {AArch64::FeatureTME}},
3628 };
3629 
3630 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3631   if (FBS[AArch64::HasV8_0aOps])
3632     Str += "ARMv8a";
3633   if (FBS[AArch64::HasV8_1aOps])
3634     Str += "ARMv8.1a";
3635   else if (FBS[AArch64::HasV8_2aOps])
3636     Str += "ARMv8.2a";
3637   else if (FBS[AArch64::HasV8_3aOps])
3638     Str += "ARMv8.3a";
3639   else if (FBS[AArch64::HasV8_4aOps])
3640     Str += "ARMv8.4a";
3641   else if (FBS[AArch64::HasV8_5aOps])
3642     Str += "ARMv8.5a";
3643   else if (FBS[AArch64::HasV8_6aOps])
3644     Str += "ARMv8.6a";
3645   else if (FBS[AArch64::HasV8_7aOps])
3646     Str += "ARMv8.7a";
3647   else if (FBS[AArch64::HasV8_8aOps])
3648     Str += "ARMv8.8a";
3649   else if (FBS[AArch64::HasV8_9aOps])
3650     Str += "ARMv8.9a";
3651   else if (FBS[AArch64::HasV9_0aOps])
3652     Str += "ARMv9-a";
3653   else if (FBS[AArch64::HasV9_1aOps])
3654     Str += "ARMv9.1a";
3655   else if (FBS[AArch64::HasV9_2aOps])
3656     Str += "ARMv9.2a";
3657   else if (FBS[AArch64::HasV9_3aOps])
3658     Str += "ARMv9.3a";
3659   else if (FBS[AArch64::HasV9_4aOps])
3660     Str += "ARMv9.4a";
3661   else if (FBS[AArch64::HasV8_0rOps])
3662     Str += "ARMv8r";
3663   else {
3664     SmallVector<std::string, 2> ExtMatches;
3665     for (const auto& Ext : ExtensionMap) {
3666       // Use & in case multiple features are enabled
3667       if ((FBS & Ext.Features) != FeatureBitset())
3668         ExtMatches.push_back(Ext.Name);
3669     }
3670     Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3671   }
3672 }
3673 
3674 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3675                                       SMLoc S) {
3676   const uint16_t Op2 = Encoding & 7;
3677   const uint16_t Cm = (Encoding & 0x78) >> 3;
3678   const uint16_t Cn = (Encoding & 0x780) >> 7;
3679   const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3680 
3681   const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3682 
3683   Operands.push_back(
3684       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3685   Operands.push_back(
3686       AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3687   Operands.push_back(
3688       AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3689   Expr = MCConstantExpr::create(Op2, getContext());
3690   Operands.push_back(
3691       AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3692 }
3693 
3694 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3695 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3696 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3697                                    OperandVector &Operands) {
3698   if (Name.contains('.'))
3699     return TokError("invalid operand");
3700 
3701   Mnemonic = Name;
3702   Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3703 
3704   const AsmToken &Tok = getTok();
3705   StringRef Op = Tok.getString();
3706   SMLoc S = Tok.getLoc();
3707 
3708   if (Mnemonic == "ic") {
3709     const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3710     if (!IC)
3711       return TokError("invalid operand for IC instruction");
3712     else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3713       std::string Str("IC " + std::string(IC->Name) + " requires: ");
3714       setRequiredFeatureString(IC->getRequiredFeatures(), Str);
3715       return TokError(Str);
3716     }
3717     createSysAlias(IC->Encoding, Operands, S);
3718   } else if (Mnemonic == "dc") {
3719     const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3720     if (!DC)
3721       return TokError("invalid operand for DC instruction");
3722     else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3723       std::string Str("DC " + std::string(DC->Name) + " requires: ");
3724       setRequiredFeatureString(DC->getRequiredFeatures(), Str);
3725       return TokError(Str);
3726     }
3727     createSysAlias(DC->Encoding, Operands, S);
3728   } else if (Mnemonic == "at") {
3729     const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3730     if (!AT)
3731       return TokError("invalid operand for AT instruction");
3732     else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3733       std::string Str("AT " + std::string(AT->Name) + " requires: ");
3734       setRequiredFeatureString(AT->getRequiredFeatures(), Str);
3735       return TokError(Str);
3736     }
3737     createSysAlias(AT->Encoding, Operands, S);
3738   } else if (Mnemonic == "tlbi") {
3739     const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3740     if (!TLBI)
3741       return TokError("invalid operand for TLBI instruction");
3742     else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3743       std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3744       setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
3745       return TokError(Str);
3746     }
3747     createSysAlias(TLBI->Encoding, Operands, S);
3748   } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3749 
3750     if (Op.lower() != "rctx")
3751       return TokError("invalid operand for prediction restriction instruction");
3752 
3753     bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3754     bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3755     bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3756 
3757     if (Mnemonic == "cosp" && !hasSpecres2)
3758       return TokError("COSP requires: predres2");
3759     if (!hasPredres)
3760       return TokError(Mnemonic.upper() + "RCTX requires: predres");
3761 
3762     uint16_t PRCTX_Op2 = Mnemonic == "cfp"    ? 0b100
3763                          : Mnemonic == "dvp"  ? 0b101
3764                          : Mnemonic == "cosp" ? 0b110
3765                          : Mnemonic == "cpp"  ? 0b111
3766                                               : 0;
3767     assert(PRCTX_Op2 &&
3768            "Invalid mnemonic for prediction restriction instruction");
3769     const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3770     const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3771 
3772     createSysAlias(Encoding, Operands, S);
3773   }
3774 
3775   Lex(); // Eat operand.
3776 
3777   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
3778   bool HasRegister = false;
3779 
3780   // Check for the optional register operand.
3781   if (parseOptionalToken(AsmToken::Comma)) {
3782     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3783       return TokError("expected register operand");
3784     HasRegister = true;
3785   }
3786 
3787   if (ExpectRegister && !HasRegister)
3788     return TokError("specified " + Mnemonic + " op requires a register");
3789   else if (!ExpectRegister && HasRegister)
3790     return TokError("specified " + Mnemonic + " op does not use a register");
3791 
3792   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3793     return true;
3794 
3795   return false;
3796 }
3797 
3798 /// parseSyspAlias - The TLBIP instructions are simple aliases for
3799 /// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
3800 bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
3801                                       OperandVector &Operands) {
3802   if (Name.contains('.'))
3803     return TokError("invalid operand");
3804 
3805   Mnemonic = Name;
3806   Operands.push_back(
3807       AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
3808 
3809   const AsmToken &Tok = getTok();
3810   StringRef Op = Tok.getString();
3811   SMLoc S = Tok.getLoc();
3812 
3813   if (Mnemonic == "tlbip") {
3814     bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
3815     if (HasnXSQualifier) {
3816       Op = Op.drop_back(3);
3817     }
3818     const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
3819     if (!TLBIorig)
3820       return TokError("invalid operand for TLBIP instruction");
3821     const AArch64TLBI::TLBI TLBI(
3822         TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3823         TLBIorig->NeedsReg,
3824         HasnXSQualifier
3825             ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
3826             : TLBIorig->FeaturesRequired);
3827     if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
3828       std::string Name =
3829           std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
3830       std::string Str("TLBIP " + Name + " requires: ");
3831       setRequiredFeatureString(TLBI.getRequiredFeatures(), Str);
3832       return TokError(Str);
3833     }
3834     createSysAlias(TLBI.Encoding, Operands, S);
3835   }
3836 
3837   Lex(); // Eat operand.
3838 
3839   if (parseComma())
3840     return true;
3841 
3842   if (Tok.isNot(AsmToken::Identifier))
3843     return TokError("expected register identifier");
3844   auto Result = tryParseSyspXzrPair(Operands);
3845   if (Result.isNoMatch())
3846     Result = tryParseGPRSeqPair(Operands);
3847   if (!Result.isSuccess())
3848     return TokError("specified " + Mnemonic +
3849                     " op requires a pair of registers");
3850 
3851   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3852     return true;
3853 
3854   return false;
3855 }
3856 
3857 ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
3858   MCAsmParser &Parser = getParser();
3859   const AsmToken &Tok = getTok();
3860 
3861   if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
3862     return TokError("'csync' operand expected");
3863   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3864     // Immediate operand.
3865     const MCExpr *ImmVal;
3866     SMLoc ExprLoc = getLoc();
3867     AsmToken IntTok = Tok;
3868     if (getParser().parseExpression(ImmVal))
3869       return ParseStatus::Failure;
3870     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3871     if (!MCE)
3872       return Error(ExprLoc, "immediate value expected for barrier operand");
3873     int64_t Value = MCE->getValue();
3874     if (Mnemonic == "dsb" && Value > 15) {
3875       // This case is a no match here, but it might be matched by the nXS
3876       // variant. Deliberately not unlex the optional '#' as it is not necessary
3877       // to characterize an integer immediate.
3878       Parser.getLexer().UnLex(IntTok);
3879       return ParseStatus::NoMatch;
3880     }
3881     if (Value < 0 || Value > 15)
3882       return Error(ExprLoc, "barrier operand out of range");
3883     auto DB = AArch64DB::lookupDBByEncoding(Value);
3884     Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
3885                                                      ExprLoc, getContext(),
3886                                                      false /*hasnXSModifier*/));
3887     return ParseStatus::Success;
3888   }
3889 
3890   if (Tok.isNot(AsmToken::Identifier))
3891     return TokError("invalid operand for instruction");
3892 
3893   StringRef Operand = Tok.getString();
3894   auto TSB = AArch64TSB::lookupTSBByName(Operand);
3895   auto DB = AArch64DB::lookupDBByName(Operand);
3896   // The only valid named option for ISB is 'sy'
3897   if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
3898     return TokError("'sy' or #imm operand expected");
3899   // The only valid named option for TSB is 'csync'
3900   if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
3901     return TokError("'csync' operand expected");
3902   if (!DB && !TSB) {
3903     if (Mnemonic == "dsb") {
3904       // This case is a no match here, but it might be matched by the nXS
3905       // variant.
3906       return ParseStatus::NoMatch;
3907     }
3908     return TokError("invalid barrier option name");
3909   }
3910 
3911   Operands.push_back(AArch64Operand::CreateBarrier(
3912       DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
3913       getContext(), false /*hasnXSModifier*/));
3914   Lex(); // Consume the option
3915 
3916   return ParseStatus::Success;
3917 }
3918 
3919 ParseStatus
3920 AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
3921   const AsmToken &Tok = getTok();
3922 
3923   assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
3924   if (Mnemonic != "dsb")
3925     return ParseStatus::Failure;
3926 
3927   if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
3928     // Immediate operand.
3929     const MCExpr *ImmVal;
3930     SMLoc ExprLoc = getLoc();
3931     if (getParser().parseExpression(ImmVal))
3932       return ParseStatus::Failure;
3933     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3934     if (!MCE)
3935       return Error(ExprLoc, "immediate value expected for barrier operand");
3936     int64_t Value = MCE->getValue();
3937     // v8.7-A DSB in the nXS variant accepts only the following immediate
3938     // values: 16, 20, 24, 28.
3939     if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
3940       return Error(ExprLoc, "barrier operand out of range");
3941     auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
3942     Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3943                                                      ExprLoc, getContext(),
3944                                                      true /*hasnXSModifier*/));
3945     return ParseStatus::Success;
3946   }
3947 
3948   if (Tok.isNot(AsmToken::Identifier))
3949     return TokError("invalid operand for instruction");
3950 
3951   StringRef Operand = Tok.getString();
3952   auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3953 
3954   if (!DB)
3955     return TokError("invalid barrier option name");
3956 
3957   Operands.push_back(
3958       AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
3959                                     getContext(), true /*hasnXSModifier*/));
3960   Lex(); // Consume the option
3961 
3962   return ParseStatus::Success;
3963 }
3964 
3965 ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
3966   const AsmToken &Tok = getTok();
3967 
3968   if (Tok.isNot(AsmToken::Identifier))
3969     return ParseStatus::NoMatch;
3970 
3971   if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
3972     return ParseStatus::NoMatch;
3973 
3974   int MRSReg, MSRReg;
3975   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
3976   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3977     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3978     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3979   } else
3980     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
3981 
3982   unsigned PStateImm = -1;
3983   auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
3984   if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
3985     PStateImm = PState15->Encoding;
3986   if (!PState15) {
3987     auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
3988     if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
3989       PStateImm = PState1->Encoding;
3990   }
3991 
3992   Operands.push_back(
3993       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
3994                                    PStateImm, getContext()));
3995   Lex(); // Eat identifier
3996 
3997   return ParseStatus::Success;
3998 }
3999 
4000 /// tryParseNeonVectorRegister - Parse a vector register operand.
4001 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4002   if (getTok().isNot(AsmToken::Identifier))
4003     return true;
4004 
4005   SMLoc S = getLoc();
4006   // Check for a vector register specifier first.
4007   StringRef Kind;
4008   MCRegister Reg;
4009   ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4010   if (!Res.isSuccess())
4011     return true;
4012 
4013   const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4014   if (!KindRes)
4015     return true;
4016 
4017   unsigned ElementWidth = KindRes->second;
4018   Operands.push_back(
4019       AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4020                                       S, getLoc(), getContext()));
4021 
4022   // If there was an explicit qualifier, that goes on as a literal text
4023   // operand.
4024   if (!Kind.empty())
4025     Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4026 
4027   return tryParseVectorIndex(Operands).isFailure();
4028 }
4029 
4030 ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4031   SMLoc SIdx = getLoc();
4032   if (parseOptionalToken(AsmToken::LBrac)) {
4033     const MCExpr *ImmVal;
4034     if (getParser().parseExpression(ImmVal))
4035       return ParseStatus::NoMatch;
4036     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4037     if (!MCE)
4038       return TokError("immediate value expected for vector index");
4039 
4040     SMLoc E = getLoc();
4041 
4042     if (parseToken(AsmToken::RBrac, "']' expected"))
4043       return ParseStatus::Failure;
4044 
4045     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4046                                                          E, getContext()));
4047     return ParseStatus::Success;
4048   }
4049 
4050   return ParseStatus::NoMatch;
4051 }
4052 
4053 // tryParseVectorRegister - Try to parse a vector register name with
4054 // optional kind specifier. If it is a register specifier, eat the token
4055 // and return it.
4056 ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4057                                                      StringRef &Kind,
4058                                                      RegKind MatchKind) {
4059   const AsmToken &Tok = getTok();
4060 
4061   if (Tok.isNot(AsmToken::Identifier))
4062     return ParseStatus::NoMatch;
4063 
4064   StringRef Name = Tok.getString();
4065   // If there is a kind specifier, it's separated from the register name by
4066   // a '.'.
4067   size_t Start = 0, Next = Name.find('.');
4068   StringRef Head = Name.slice(Start, Next);
4069   unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4070 
4071   if (RegNum) {
4072     if (Next != StringRef::npos) {
4073       Kind = Name.slice(Next, StringRef::npos);
4074       if (!isValidVectorKind(Kind, MatchKind))
4075         return TokError("invalid vector kind qualifier");
4076     }
4077     Lex(); // Eat the register token.
4078 
4079     Reg = RegNum;
4080     return ParseStatus::Success;
4081   }
4082 
4083   return ParseStatus::NoMatch;
4084 }
4085 
4086 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4087 template <RegKind RK>
4088 ParseStatus
4089 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4090   // Check for a SVE predicate register specifier first.
4091   const SMLoc S = getLoc();
4092   StringRef Kind;
4093   MCRegister RegNum;
4094   auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4095   if (!Res.isSuccess())
4096     return Res;
4097 
4098   const auto &KindRes = parseVectorKind(Kind, RK);
4099   if (!KindRes)
4100     return ParseStatus::NoMatch;
4101 
4102   unsigned ElementWidth = KindRes->second;
4103   Operands.push_back(AArch64Operand::CreateVectorReg(
4104       RegNum, RK, ElementWidth, S,
4105       getLoc(), getContext()));
4106 
4107   if (getLexer().is(AsmToken::LBrac)) {
4108     if (RK == RegKind::SVEPredicateAsCounter) {
4109       ParseStatus ResIndex = tryParseVectorIndex(Operands);
4110       if (ResIndex.isSuccess())
4111         return ParseStatus::Success;
4112     } else {
4113       // Indexed predicate, there's no comma so try parse the next operand
4114       // immediately.
4115       if (parseOperand(Operands, false, false))
4116         return ParseStatus::NoMatch;
4117     }
4118   }
4119 
4120   // Not all predicates are followed by a '/m' or '/z'.
4121   if (getTok().isNot(AsmToken::Slash))
4122     return ParseStatus::Success;
4123 
4124   // But when they do they shouldn't have an element type suffix.
4125   if (!Kind.empty())
4126     return Error(S, "not expecting size suffix");
4127 
4128   // Add a literal slash as operand
4129   Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4130 
4131   Lex(); // Eat the slash.
4132 
4133   // Zeroing or merging?
4134   auto Pred = getTok().getString().lower();
4135   if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4136     return Error(getLoc(), "expecting 'z' predication");
4137 
4138   if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4139     return Error(getLoc(), "expecting 'm' or 'z' predication");
4140 
4141   // Add zero/merge token.
4142   const char *ZM = Pred == "z" ? "z" : "m";
4143   Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4144 
4145   Lex(); // Eat zero/merge token.
4146   return ParseStatus::Success;
4147 }
4148 
4149 /// parseRegister - Parse a register operand.
4150 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4151   // Try for a Neon vector register.
4152   if (!tryParseNeonVectorRegister(Operands))
4153     return false;
4154 
4155   if (tryParseZTOperand(Operands).isSuccess())
4156     return false;
4157 
4158   // Otherwise try for a scalar register.
4159   if (tryParseGPROperand<false>(Operands).isSuccess())
4160     return false;
4161 
4162   return true;
4163 }
4164 
4165 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4166   bool HasELFModifier = false;
4167   AArch64MCExpr::VariantKind RefKind;
4168 
4169   if (parseOptionalToken(AsmToken::Colon)) {
4170     HasELFModifier = true;
4171 
4172     if (getTok().isNot(AsmToken::Identifier))
4173       return TokError("expect relocation specifier in operand after ':'");
4174 
4175     std::string LowerCase = getTok().getIdentifier().lower();
4176     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
4177                   .Case("lo12", AArch64MCExpr::VK_LO12)
4178                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
4179                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
4180                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
4181                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
4182                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
4183                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
4184                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
4185                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
4186                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
4187                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
4188                   .Case("prel_g3", AArch64MCExpr::VK_PREL_G3)
4189                   .Case("prel_g2", AArch64MCExpr::VK_PREL_G2)
4190                   .Case("prel_g2_nc", AArch64MCExpr::VK_PREL_G2_NC)
4191                   .Case("prel_g1", AArch64MCExpr::VK_PREL_G1)
4192                   .Case("prel_g1_nc", AArch64MCExpr::VK_PREL_G1_NC)
4193                   .Case("prel_g0", AArch64MCExpr::VK_PREL_G0)
4194                   .Case("prel_g0_nc", AArch64MCExpr::VK_PREL_G0_NC)
4195                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
4196                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
4197                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
4198                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
4199                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
4200                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
4201                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
4202                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
4203                   .Case("pg_hi21_nc", AArch64MCExpr::VK_ABS_PAGE_NC)
4204                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
4205                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
4206                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
4207                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
4208                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
4209                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
4210                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
4211                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
4212                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
4213                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
4214                   .Case("gotpage_lo15", AArch64MCExpr::VK_GOT_PAGE_LO15)
4215                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
4216                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
4217                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
4218                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
4219                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
4220                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
4221                   .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
4222                   .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
4223                   .Default(AArch64MCExpr::VK_INVALID);
4224 
4225     if (RefKind == AArch64MCExpr::VK_INVALID)
4226       return TokError("expect relocation specifier in operand after ':'");
4227 
4228     Lex(); // Eat identifier
4229 
4230     if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4231       return true;
4232   }
4233 
4234   if (getParser().parseExpression(ImmVal))
4235     return true;
4236 
4237   if (HasELFModifier)
4238     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
4239 
4240   return false;
4241 }
4242 
4243 ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4244   if (getTok().isNot(AsmToken::LCurly))
4245     return ParseStatus::NoMatch;
4246 
4247   auto ParseMatrixTile = [this](unsigned &Reg,
4248                                 unsigned &ElementWidth) -> ParseStatus {
4249     StringRef Name = getTok().getString();
4250     size_t DotPosition = Name.find('.');
4251     if (DotPosition == StringRef::npos)
4252       return ParseStatus::NoMatch;
4253 
4254     unsigned RegNum = matchMatrixTileListRegName(Name);
4255     if (!RegNum)
4256       return ParseStatus::NoMatch;
4257 
4258     StringRef Tail = Name.drop_front(DotPosition);
4259     const std::optional<std::pair<int, int>> &KindRes =
4260         parseVectorKind(Tail, RegKind::Matrix);
4261     if (!KindRes)
4262       return TokError(
4263           "Expected the register to be followed by element width suffix");
4264     ElementWidth = KindRes->second;
4265     Reg = RegNum;
4266     Lex(); // Eat the register.
4267     return ParseStatus::Success;
4268   };
4269 
4270   SMLoc S = getLoc();
4271   auto LCurly = getTok();
4272   Lex(); // Eat left bracket token.
4273 
4274   // Empty matrix list
4275   if (parseOptionalToken(AsmToken::RCurly)) {
4276     Operands.push_back(AArch64Operand::CreateMatrixTileList(
4277         /*RegMask=*/0, S, getLoc(), getContext()));
4278     return ParseStatus::Success;
4279   }
4280 
4281   // Try parse {za} alias early
4282   if (getTok().getString().equals_insensitive("za")) {
4283     Lex(); // Eat 'za'
4284 
4285     if (parseToken(AsmToken::RCurly, "'}' expected"))
4286       return ParseStatus::Failure;
4287 
4288     Operands.push_back(AArch64Operand::CreateMatrixTileList(
4289         /*RegMask=*/0xFF, S, getLoc(), getContext()));
4290     return ParseStatus::Success;
4291   }
4292 
4293   SMLoc TileLoc = getLoc();
4294 
4295   unsigned FirstReg, ElementWidth;
4296   auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4297   if (!ParseRes.isSuccess()) {
4298     getLexer().UnLex(LCurly);
4299     return ParseRes;
4300   }
4301 
4302   const MCRegisterInfo *RI = getContext().getRegisterInfo();
4303 
4304   unsigned PrevReg = FirstReg;
4305 
4306   SmallSet<unsigned, 8> DRegs;
4307   AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4308 
4309   SmallSet<unsigned, 8> SeenRegs;
4310   SeenRegs.insert(FirstReg);
4311 
4312   while (parseOptionalToken(AsmToken::Comma)) {
4313     TileLoc = getLoc();
4314     unsigned Reg, NextElementWidth;
4315     ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4316     if (!ParseRes.isSuccess())
4317       return ParseRes;
4318 
4319     // Element size must match on all regs in the list.
4320     if (ElementWidth != NextElementWidth)
4321       return Error(TileLoc, "mismatched register size suffix");
4322 
4323     if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4324       Warning(TileLoc, "tile list not in ascending order");
4325 
4326     if (SeenRegs.contains(Reg))
4327       Warning(TileLoc, "duplicate tile in list");
4328     else {
4329       SeenRegs.insert(Reg);
4330       AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4331     }
4332 
4333     PrevReg = Reg;
4334   }
4335 
4336   if (parseToken(AsmToken::RCurly, "'}' expected"))
4337     return ParseStatus::Failure;
4338 
4339   unsigned RegMask = 0;
4340   for (auto Reg : DRegs)
4341     RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4342                        RI->getEncodingValue(AArch64::ZAD0));
4343   Operands.push_back(
4344       AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4345 
4346   return ParseStatus::Success;
4347 }
4348 
4349 template <RegKind VectorKind>
4350 ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4351                                                  bool ExpectMatch) {
4352   MCAsmParser &Parser = getParser();
4353   if (!getTok().is(AsmToken::LCurly))
4354     return ParseStatus::NoMatch;
4355 
4356   // Wrapper around parse function
4357   auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4358                             bool NoMatchIsError) -> ParseStatus {
4359     auto RegTok = getTok();
4360     auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4361     if (ParseRes.isSuccess()) {
4362       if (parseVectorKind(Kind, VectorKind))
4363         return ParseRes;
4364       llvm_unreachable("Expected a valid vector kind");
4365     }
4366 
4367     if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4368         RegTok.getString().equals_insensitive("zt0"))
4369       return ParseStatus::NoMatch;
4370 
4371     if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4372         (ParseRes.isNoMatch() && NoMatchIsError &&
4373          !RegTok.getString().starts_with_insensitive("za")))
4374       return Error(Loc, "vector register expected");
4375 
4376     return ParseStatus::NoMatch;
4377   };
4378 
4379   int NumRegs = getNumRegsForRegKind(VectorKind);
4380   SMLoc S = getLoc();
4381   auto LCurly = getTok();
4382   Lex(); // Eat left bracket token.
4383 
4384   StringRef Kind;
4385   MCRegister FirstReg;
4386   auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4387 
4388   // Put back the original left bracket if there was no match, so that
4389   // different types of list-operands can be matched (e.g. SVE, Neon).
4390   if (ParseRes.isNoMatch())
4391     Parser.getLexer().UnLex(LCurly);
4392 
4393   if (!ParseRes.isSuccess())
4394     return ParseRes;
4395 
4396   int64_t PrevReg = FirstReg;
4397   unsigned Count = 1;
4398 
4399   int Stride = 1;
4400   if (parseOptionalToken(AsmToken::Minus)) {
4401     SMLoc Loc = getLoc();
4402     StringRef NextKind;
4403 
4404     MCRegister Reg;
4405     ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4406     if (!ParseRes.isSuccess())
4407       return ParseRes;
4408 
4409     // Any Kind suffices must match on all regs in the list.
4410     if (Kind != NextKind)
4411       return Error(Loc, "mismatched register size suffix");
4412 
4413     unsigned Space =
4414         (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + NumRegs - PrevReg);
4415 
4416     if (Space == 0 || Space > 3)
4417       return Error(Loc, "invalid number of vectors");
4418 
4419     Count += Space;
4420   }
4421   else {
4422     bool HasCalculatedStride = false;
4423     while (parseOptionalToken(AsmToken::Comma)) {
4424       SMLoc Loc = getLoc();
4425       StringRef NextKind;
4426       MCRegister Reg;
4427       ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4428       if (!ParseRes.isSuccess())
4429         return ParseRes;
4430 
4431       // Any Kind suffices must match on all regs in the list.
4432       if (Kind != NextKind)
4433         return Error(Loc, "mismatched register size suffix");
4434 
4435       unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4436       unsigned PrevRegVal =
4437           getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4438       if (!HasCalculatedStride) {
4439         Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4440                                        : (RegVal + NumRegs - PrevRegVal);
4441         HasCalculatedStride = true;
4442       }
4443 
4444       // Register must be incremental (with a wraparound at last register).
4445       if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4446         return Error(Loc, "registers must have the same sequential stride");
4447 
4448       PrevReg = Reg;
4449       ++Count;
4450     }
4451   }
4452 
4453   if (parseToken(AsmToken::RCurly, "'}' expected"))
4454     return ParseStatus::Failure;
4455 
4456   if (Count > 4)
4457     return Error(S, "invalid number of vectors");
4458 
4459   unsigned NumElements = 0;
4460   unsigned ElementWidth = 0;
4461   if (!Kind.empty()) {
4462     if (const auto &VK = parseVectorKind(Kind, VectorKind))
4463       std::tie(NumElements, ElementWidth) = *VK;
4464   }
4465 
4466   Operands.push_back(AArch64Operand::CreateVectorList(
4467       FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4468       getLoc(), getContext()));
4469 
4470   return ParseStatus::Success;
4471 }
4472 
4473 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4474 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4475   auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4476   if (!ParseRes.isSuccess())
4477     return true;
4478 
4479   return tryParseVectorIndex(Operands).isFailure();
4480 }
4481 
4482 ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4483   SMLoc StartLoc = getLoc();
4484 
4485   MCRegister RegNum;
4486   ParseStatus Res = tryParseScalarRegister(RegNum);
4487   if (!Res.isSuccess())
4488     return Res;
4489 
4490   if (!parseOptionalToken(AsmToken::Comma)) {
4491     Operands.push_back(AArch64Operand::CreateReg(
4492         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4493     return ParseStatus::Success;
4494   }
4495 
4496   parseOptionalToken(AsmToken::Hash);
4497 
4498   if (getTok().isNot(AsmToken::Integer))
4499     return Error(getLoc(), "index must be absent or #0");
4500 
4501   const MCExpr *ImmVal;
4502   if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4503       cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4504     return Error(getLoc(), "index must be absent or #0");
4505 
4506   Operands.push_back(AArch64Operand::CreateReg(
4507       RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4508   return ParseStatus::Success;
4509 }
4510 
4511 ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4512   SMLoc StartLoc = getLoc();
4513   const AsmToken &Tok = getTok();
4514   std::string Name = Tok.getString().lower();
4515 
4516   unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4517 
4518   if (RegNum == 0)
4519     return ParseStatus::NoMatch;
4520 
4521   Operands.push_back(AArch64Operand::CreateReg(
4522       RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4523   Lex(); // Eat identifier token.
4524 
4525   // Check if register is followed by an index
4526   if (parseOptionalToken(AsmToken::LBrac)) {
4527     const MCExpr *ImmVal;
4528     if (getParser().parseExpression(ImmVal))
4529       return ParseStatus::NoMatch;
4530     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4531     if (!MCE)
4532       return TokError("immediate value expected for vector index");
4533     if (parseToken(AsmToken::RBrac, "']' expected"))
4534       return ParseStatus::Failure;
4535 
4536     Operands.push_back(AArch64Operand::CreateImm(
4537         MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4538         getLoc(), getContext()));
4539   }
4540 
4541   return ParseStatus::Success;
4542 }
4543 
4544 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4545 ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4546   SMLoc StartLoc = getLoc();
4547 
4548   MCRegister RegNum;
4549   ParseStatus Res = tryParseScalarRegister(RegNum);
4550   if (!Res.isSuccess())
4551     return Res;
4552 
4553   // No shift/extend is the default.
4554   if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4555     Operands.push_back(AArch64Operand::CreateReg(
4556         RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4557     return ParseStatus::Success;
4558   }
4559 
4560   // Eat the comma
4561   Lex();
4562 
4563   // Match the shift
4564   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
4565   Res = tryParseOptionalShiftExtend(ExtOpnd);
4566   if (!Res.isSuccess())
4567     return Res;
4568 
4569   auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4570   Operands.push_back(AArch64Operand::CreateReg(
4571       RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4572       Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4573       Ext->hasShiftExtendAmount()));
4574 
4575   return ParseStatus::Success;
4576 }
4577 
4578 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4579   MCAsmParser &Parser = getParser();
4580 
4581   // Some SVE instructions have a decoration after the immediate, i.e.
4582   // "mul vl". We parse them here and add tokens, which must be present in the
4583   // asm string in the tablegen instruction.
4584   bool NextIsVL =
4585       Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4586   bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4587   if (!getTok().getString().equals_insensitive("mul") ||
4588       !(NextIsVL || NextIsHash))
4589     return true;
4590 
4591   Operands.push_back(
4592       AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4593   Lex(); // Eat the "mul"
4594 
4595   if (NextIsVL) {
4596     Operands.push_back(
4597         AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4598     Lex(); // Eat the "vl"
4599     return false;
4600   }
4601 
4602   if (NextIsHash) {
4603     Lex(); // Eat the #
4604     SMLoc S = getLoc();
4605 
4606     // Parse immediate operand.
4607     const MCExpr *ImmVal;
4608     if (!Parser.parseExpression(ImmVal))
4609       if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4610         Operands.push_back(AArch64Operand::CreateImm(
4611             MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4612             getContext()));
4613         return false;
4614       }
4615   }
4616 
4617   return Error(getLoc(), "expected 'vl' or '#<imm>'");
4618 }
4619 
4620 bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4621                                               StringRef &VecGroup) {
4622   MCAsmParser &Parser = getParser();
4623   auto Tok = Parser.getTok();
4624   if (Tok.isNot(AsmToken::Identifier))
4625     return true;
4626 
4627   StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4628                      .Case("vgx2", "vgx2")
4629                      .Case("vgx4", "vgx4")
4630                      .Default("");
4631 
4632   if (VG.empty())
4633     return true;
4634 
4635   VecGroup = VG;
4636   Parser.Lex(); // Eat vgx[2|4]
4637   return false;
4638 }
4639 
4640 bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4641   auto Tok = getTok();
4642   if (Tok.isNot(AsmToken::Identifier))
4643     return true;
4644 
4645   auto Keyword = Tok.getString();
4646   Keyword = StringSwitch<StringRef>(Keyword.lower())
4647                 .Case("sm", "sm")
4648                 .Case("za", "za")
4649                 .Default(Keyword);
4650   Operands.push_back(
4651       AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4652 
4653   Lex();
4654   return false;
4655 }
4656 
4657 /// parseOperand - Parse a arm instruction operand.  For now this parses the
4658 /// operand regardless of the mnemonic.
4659 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4660                                   bool invertCondCode) {
4661   MCAsmParser &Parser = getParser();
4662 
4663   ParseStatus ResTy =
4664       MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4665 
4666   // Check if the current operand has a custom associated parser, if so, try to
4667   // custom parse the operand, or fallback to the general approach.
4668   if (ResTy.isSuccess())
4669     return false;
4670   // If there wasn't a custom match, try the generic matcher below. Otherwise,
4671   // there was a match, but an error occurred, in which case, just return that
4672   // the operand parsing failed.
4673   if (ResTy.isFailure())
4674     return true;
4675 
4676   // Nothing custom, so do general case parsing.
4677   SMLoc S, E;
4678   switch (getLexer().getKind()) {
4679   default: {
4680     SMLoc S = getLoc();
4681     const MCExpr *Expr;
4682     if (parseSymbolicImmVal(Expr))
4683       return Error(S, "invalid operand");
4684 
4685     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4686     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4687     return false;
4688   }
4689   case AsmToken::LBrac: {
4690     Operands.push_back(
4691         AArch64Operand::CreateToken("[", getLoc(), getContext()));
4692     Lex(); // Eat '['
4693 
4694     // There's no comma after a '[', so we can parse the next operand
4695     // immediately.
4696     return parseOperand(Operands, false, false);
4697   }
4698   case AsmToken::LCurly: {
4699     if (!parseNeonVectorList(Operands))
4700       return false;
4701 
4702     Operands.push_back(
4703         AArch64Operand::CreateToken("{", getLoc(), getContext()));
4704     Lex(); // Eat '{'
4705 
4706     // There's no comma after a '{', so we can parse the next operand
4707     // immediately.
4708     return parseOperand(Operands, false, false);
4709   }
4710   case AsmToken::Identifier: {
4711     // See if this is a "VG" decoration used by SME instructions.
4712     StringRef VecGroup;
4713     if (!parseOptionalVGOperand(Operands, VecGroup)) {
4714       Operands.push_back(
4715           AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4716       return false;
4717     }
4718     // If we're expecting a Condition Code operand, then just parse that.
4719     if (isCondCode)
4720       return parseCondCode(Operands, invertCondCode);
4721 
4722     // If it's a register name, parse it.
4723     if (!parseRegister(Operands))
4724       return false;
4725 
4726     // See if this is a "mul vl" decoration or "mul #<int>" operand used
4727     // by SVE instructions.
4728     if (!parseOptionalMulOperand(Operands))
4729       return false;
4730 
4731     // This could be an optional "shift" or "extend" operand.
4732     ParseStatus GotShift = tryParseOptionalShiftExtend(Operands);
4733     // We can only continue if no tokens were eaten.
4734     if (!GotShift.isNoMatch())
4735       return GotShift.isFailure();
4736 
4737     // If this is a two-word mnemonic, parse its special keyword
4738     // operand as an identifier.
4739     if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
4740         Mnemonic == "gcsb")
4741       return parseKeywordOperand(Operands);
4742 
4743     // This was not a register so parse other operands that start with an
4744     // identifier (like labels) as expressions and create them as immediates.
4745     const MCExpr *IdVal;
4746     S = getLoc();
4747     if (getParser().parseExpression(IdVal))
4748       return true;
4749     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4750     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4751     return false;
4752   }
4753   case AsmToken::Integer:
4754   case AsmToken::Real:
4755   case AsmToken::Hash: {
4756     // #42 -> immediate.
4757     S = getLoc();
4758 
4759     parseOptionalToken(AsmToken::Hash);
4760 
4761     // Parse a negative sign
4762     bool isNegative = false;
4763     if (getTok().is(AsmToken::Minus)) {
4764       isNegative = true;
4765       // We need to consume this token only when we have a Real, otherwise
4766       // we let parseSymbolicImmVal take care of it
4767       if (Parser.getLexer().peekTok().is(AsmToken::Real))
4768         Lex();
4769     }
4770 
4771     // The only Real that should come through here is a literal #0.0 for
4772     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
4773     // so convert the value.
4774     const AsmToken &Tok = getTok();
4775     if (Tok.is(AsmToken::Real)) {
4776       APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
4777       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4778       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
4779           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
4780           Mnemonic != "fcmlt" && Mnemonic != "fcmne")
4781         return TokError("unexpected floating point literal");
4782       else if (IntVal != 0 || isNegative)
4783         return TokError("expected floating-point constant #0.0");
4784       Lex(); // Eat the token.
4785 
4786       Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
4787       Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
4788       return false;
4789     }
4790 
4791     const MCExpr *ImmVal;
4792     if (parseSymbolicImmVal(ImmVal))
4793       return true;
4794 
4795     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4796     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
4797     return false;
4798   }
4799   case AsmToken::Equal: {
4800     SMLoc Loc = getLoc();
4801     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
4802       return TokError("unexpected token in operand");
4803     Lex(); // Eat '='
4804     const MCExpr *SubExprVal;
4805     if (getParser().parseExpression(SubExprVal))
4806       return true;
4807 
4808     if (Operands.size() < 2 ||
4809         !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
4810       return Error(Loc, "Only valid when first operand is register");
4811 
4812     bool IsXReg =
4813         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4814             Operands[1]->getReg());
4815 
4816     MCContext& Ctx = getContext();
4817     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
4818     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
4819     if (isa<MCConstantExpr>(SubExprVal)) {
4820       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4821       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4822       while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
4823         ShiftAmt += 16;
4824         Imm >>= 16;
4825       }
4826       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
4827         Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
4828         Operands.push_back(AArch64Operand::CreateImm(
4829             MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
4830         if (ShiftAmt)
4831           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
4832                      ShiftAmt, true, S, E, Ctx));
4833         return false;
4834       }
4835       APInt Simm = APInt(64, Imm << ShiftAmt);
4836       // check if the immediate is an unsigned or signed 32-bit int for W regs
4837       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
4838         return Error(Loc, "Immediate too large for register");
4839     }
4840     // If it is a label or an imm that cannot fit in a movz, put it into CP.
4841     const MCExpr *CPLoc =
4842         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4843     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
4844     return false;
4845   }
4846   }
4847 }
4848 
4849 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4850   const MCExpr *Expr = nullptr;
4851   SMLoc L = getLoc();
4852   if (check(getParser().parseExpression(Expr), L, "expected expression"))
4853     return true;
4854   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4855   if (check(!Value, L, "expected constant expression"))
4856     return true;
4857   Out = Value->getValue();
4858   return false;
4859 }
4860 
4861 bool AArch64AsmParser::parseComma() {
4862   if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
4863     return true;
4864   // Eat the comma
4865   Lex();
4866   return false;
4867 }
4868 
4869 bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
4870                                             unsigned First, unsigned Last) {
4871   MCRegister Reg;
4872   SMLoc Start, End;
4873   if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
4874     return true;
4875 
4876   // Special handling for FP and LR; they aren't linearly after x28 in
4877   // the registers enum.
4878   unsigned RangeEnd = Last;
4879   if (Base == AArch64::X0) {
4880     if (Last == AArch64::FP) {
4881       RangeEnd = AArch64::X28;
4882       if (Reg == AArch64::FP) {
4883         Out = 29;
4884         return false;
4885       }
4886     }
4887     if (Last == AArch64::LR) {
4888       RangeEnd = AArch64::X28;
4889       if (Reg == AArch64::FP) {
4890         Out = 29;
4891         return false;
4892       } else if (Reg == AArch64::LR) {
4893         Out = 30;
4894         return false;
4895       }
4896     }
4897   }
4898 
4899   if (check(Reg < First || Reg > RangeEnd, Start,
4900             Twine("expected register in range ") +
4901                 AArch64InstPrinter::getRegisterName(First) + " to " +
4902                 AArch64InstPrinter::getRegisterName(Last)))
4903     return true;
4904   Out = Reg - Base;
4905   return false;
4906 }
4907 
4908 bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
4909                                     const MCParsedAsmOperand &Op2) const {
4910   auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
4911   auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
4912 
4913   if (AOp1.isVectorList() && AOp2.isVectorList())
4914     return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
4915            AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
4916            AOp1.getVectorListStride() == AOp2.getVectorListStride();
4917 
4918   if (!AOp1.isReg() || !AOp2.isReg())
4919     return false;
4920 
4921   if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4922       AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4923     return MCTargetAsmParser::areEqualRegs(Op1, Op2);
4924 
4925   assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4926          "Testing equality of non-scalar registers not supported");
4927 
4928   // Check if a registers match their sub/super register classes.
4929   if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4930     return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
4931   if (AOp1.getRegEqualityTy() == EqualsSubReg)
4932     return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
4933   if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4934     return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
4935   if (AOp2.getRegEqualityTy() == EqualsSubReg)
4936     return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
4937 
4938   return false;
4939 }
4940 
4941 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
4942 /// operands.
4943 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
4944                                         StringRef Name, SMLoc NameLoc,
4945                                         OperandVector &Operands) {
4946   Name = StringSwitch<StringRef>(Name.lower())
4947              .Case("beq", "b.eq")
4948              .Case("bne", "b.ne")
4949              .Case("bhs", "b.hs")
4950              .Case("bcs", "b.cs")
4951              .Case("blo", "b.lo")
4952              .Case("bcc", "b.cc")
4953              .Case("bmi", "b.mi")
4954              .Case("bpl", "b.pl")
4955              .Case("bvs", "b.vs")
4956              .Case("bvc", "b.vc")
4957              .Case("bhi", "b.hi")
4958              .Case("bls", "b.ls")
4959              .Case("bge", "b.ge")
4960              .Case("blt", "b.lt")
4961              .Case("bgt", "b.gt")
4962              .Case("ble", "b.le")
4963              .Case("bal", "b.al")
4964              .Case("bnv", "b.nv")
4965              .Default(Name);
4966 
4967   // First check for the AArch64-specific .req directive.
4968   if (getTok().is(AsmToken::Identifier) &&
4969       getTok().getIdentifier().lower() == ".req") {
4970     parseDirectiveReq(Name, NameLoc);
4971     // We always return 'error' for this, as we're done with this
4972     // statement and don't need to match the 'instruction."
4973     return true;
4974   }
4975 
4976   // Create the leading tokens for the mnemonic, split by '.' characters.
4977   size_t Start = 0, Next = Name.find('.');
4978   StringRef Head = Name.slice(Start, Next);
4979 
4980   // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
4981   // the SYS instruction.
4982   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
4983       Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
4984     return parseSysAlias(Head, NameLoc, Operands);
4985 
4986   // TLBIP instructions are aliases for the SYSP instruction.
4987   if (Head == "tlbip")
4988     return parseSyspAlias(Head, NameLoc, Operands);
4989 
4990   Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4991   Mnemonic = Head;
4992 
4993   // Handle condition codes for a branch mnemonic
4994   if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
4995     Start = Next;
4996     Next = Name.find('.', Start + 1);
4997     Head = Name.slice(Start + 1, Next);
4998 
4999     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5000                                             (Head.data() - Name.data()));
5001     std::string Suggestion;
5002     AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5003     if (CC == AArch64CC::Invalid) {
5004       std::string Msg = "invalid condition code";
5005       if (!Suggestion.empty())
5006         Msg += ", did you mean " + Suggestion + "?";
5007       return Error(SuffixLoc, Msg);
5008     }
5009     Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5010                                                    /*IsSuffix=*/true));
5011     Operands.push_back(
5012         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5013   }
5014 
5015   // Add the remaining tokens in the mnemonic.
5016   while (Next != StringRef::npos) {
5017     Start = Next;
5018     Next = Name.find('.', Start + 1);
5019     Head = Name.slice(Start, Next);
5020     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5021                                             (Head.data() - Name.data()) + 1);
5022     Operands.push_back(AArch64Operand::CreateToken(
5023         Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5024   }
5025 
5026   // Conditional compare instructions have a Condition Code operand, which needs
5027   // to be parsed and an immediate operand created.
5028   bool condCodeFourthOperand =
5029       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5030        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5031        Head == "csinc" || Head == "csinv" || Head == "csneg");
5032 
5033   // These instructions are aliases to some of the conditional select
5034   // instructions. However, the condition code is inverted in the aliased
5035   // instruction.
5036   //
5037   // FIXME: Is this the correct way to handle these? Or should the parser
5038   //        generate the aliased instructions directly?
5039   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5040   bool condCodeThirdOperand =
5041       (Head == "cinc" || Head == "cinv" || Head == "cneg");
5042 
5043   // Read the remaining operands.
5044   if (getLexer().isNot(AsmToken::EndOfStatement)) {
5045 
5046     unsigned N = 1;
5047     do {
5048       // Parse and remember the operand.
5049       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5050                                      (N == 3 && condCodeThirdOperand) ||
5051                                      (N == 2 && condCodeSecondOperand),
5052                        condCodeSecondOperand || condCodeThirdOperand)) {
5053         return true;
5054       }
5055 
5056       // After successfully parsing some operands there are three special cases
5057       // to consider (i.e. notional operands not separated by commas). Two are
5058       // due to memory specifiers:
5059       //  + An RBrac will end an address for load/store/prefetch
5060       //  + An '!' will indicate a pre-indexed operation.
5061       //
5062       // And a further case is '}', which ends a group of tokens specifying the
5063       // SME accumulator array 'ZA' or tile vector, i.e.
5064       //
5065       //   '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5066       //
5067       // It's someone else's responsibility to make sure these tokens are sane
5068       // in the given context!
5069 
5070       if (parseOptionalToken(AsmToken::RBrac))
5071         Operands.push_back(
5072             AArch64Operand::CreateToken("]", getLoc(), getContext()));
5073       if (parseOptionalToken(AsmToken::Exclaim))
5074         Operands.push_back(
5075             AArch64Operand::CreateToken("!", getLoc(), getContext()));
5076       if (parseOptionalToken(AsmToken::RCurly))
5077         Operands.push_back(
5078             AArch64Operand::CreateToken("}", getLoc(), getContext()));
5079 
5080       ++N;
5081     } while (parseOptionalToken(AsmToken::Comma));
5082   }
5083 
5084   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5085     return true;
5086 
5087   return false;
5088 }
5089 
5090 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
5091   assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5092   return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5093          (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5094          (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5095          (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5096          (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5097          (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5098 }
5099 
5100 // FIXME: This entire function is a giant hack to provide us with decent
5101 // operand range validation/diagnostics until TableGen/MC can be extended
5102 // to support autogeneration of this kind of validation.
5103 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5104                                            SmallVectorImpl<SMLoc> &Loc) {
5105   const MCRegisterInfo *RI = getContext().getRegisterInfo();
5106   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5107 
5108   // A prefix only applies to the instruction following it.  Here we extract
5109   // prefix information for the next instruction before validating the current
5110   // one so that in the case of failure we don't erronously continue using the
5111   // current prefix.
5112   PrefixInfo Prefix = NextPrefix;
5113   NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5114 
5115   // Before validating the instruction in isolation we run through the rules
5116   // applicable when it follows a prefix instruction.
5117   // NOTE: brk & hlt can be prefixed but require no additional validation.
5118   if (Prefix.isActive() &&
5119       (Inst.getOpcode() != AArch64::BRK) &&
5120       (Inst.getOpcode() != AArch64::HLT)) {
5121 
5122     // Prefixed intructions must have a destructive operand.
5123     if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
5124         AArch64::NotDestructive)
5125       return Error(IDLoc, "instruction is unpredictable when following a"
5126                    " movprfx, suggest replacing movprfx with mov");
5127 
5128     // Destination operands must match.
5129     if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5130       return Error(Loc[0], "instruction is unpredictable when following a"
5131                    " movprfx writing to a different destination");
5132 
5133     // Destination operand must not be used in any other location.
5134     for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5135       if (Inst.getOperand(i).isReg() &&
5136           (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5137           isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5138         return Error(Loc[0], "instruction is unpredictable when following a"
5139                      " movprfx and destination also used as non-destructive"
5140                      " source");
5141     }
5142 
5143     auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5144     if (Prefix.isPredicated()) {
5145       int PgIdx = -1;
5146 
5147       // Find the instructions general predicate.
5148       for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5149         if (Inst.getOperand(i).isReg() &&
5150             PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5151           PgIdx = i;
5152           break;
5153         }
5154 
5155       // Instruction must be predicated if the movprfx is predicated.
5156       if (PgIdx == -1 ||
5157           (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
5158         return Error(IDLoc, "instruction is unpredictable when following a"
5159                      " predicated movprfx, suggest using unpredicated movprfx");
5160 
5161       // Instruction must use same general predicate as the movprfx.
5162       if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5163         return Error(IDLoc, "instruction is unpredictable when following a"
5164                      " predicated movprfx using a different general predicate");
5165 
5166       // Instruction element type must match the movprfx.
5167       if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5168         return Error(IDLoc, "instruction is unpredictable when following a"
5169                      " predicated movprfx with a different element size");
5170     }
5171   }
5172 
5173   // Check for indexed addressing modes w/ the base register being the
5174   // same as a destination/source register or pair load where
5175   // the Rt == Rt2. All of those are undefined behaviour.
5176   switch (Inst.getOpcode()) {
5177   case AArch64::LDPSWpre:
5178   case AArch64::LDPWpost:
5179   case AArch64::LDPWpre:
5180   case AArch64::LDPXpost:
5181   case AArch64::LDPXpre: {
5182     unsigned Rt = Inst.getOperand(1).getReg();
5183     unsigned Rt2 = Inst.getOperand(2).getReg();
5184     unsigned Rn = Inst.getOperand(3).getReg();
5185     if (RI->isSubRegisterEq(Rn, Rt))
5186       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5187                            "is also a destination");
5188     if (RI->isSubRegisterEq(Rn, Rt2))
5189       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5190                            "is also a destination");
5191     [[fallthrough]];
5192   }
5193   case AArch64::LDR_ZA:
5194   case AArch64::STR_ZA: {
5195     if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5196         Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5197       return Error(Loc[1],
5198                    "unpredictable instruction, immediate and offset mismatch.");
5199     break;
5200   }
5201   case AArch64::LDPDi:
5202   case AArch64::LDPQi:
5203   case AArch64::LDPSi:
5204   case AArch64::LDPSWi:
5205   case AArch64::LDPWi:
5206   case AArch64::LDPXi: {
5207     unsigned Rt = Inst.getOperand(0).getReg();
5208     unsigned Rt2 = Inst.getOperand(1).getReg();
5209     if (Rt == Rt2)
5210       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5211     break;
5212   }
5213   case AArch64::LDPDpost:
5214   case AArch64::LDPDpre:
5215   case AArch64::LDPQpost:
5216   case AArch64::LDPQpre:
5217   case AArch64::LDPSpost:
5218   case AArch64::LDPSpre:
5219   case AArch64::LDPSWpost: {
5220     unsigned Rt = Inst.getOperand(1).getReg();
5221     unsigned Rt2 = Inst.getOperand(2).getReg();
5222     if (Rt == Rt2)
5223       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5224     break;
5225   }
5226   case AArch64::STPDpost:
5227   case AArch64::STPDpre:
5228   case AArch64::STPQpost:
5229   case AArch64::STPQpre:
5230   case AArch64::STPSpost:
5231   case AArch64::STPSpre:
5232   case AArch64::STPWpost:
5233   case AArch64::STPWpre:
5234   case AArch64::STPXpost:
5235   case AArch64::STPXpre: {
5236     unsigned Rt = Inst.getOperand(1).getReg();
5237     unsigned Rt2 = Inst.getOperand(2).getReg();
5238     unsigned Rn = Inst.getOperand(3).getReg();
5239     if (RI->isSubRegisterEq(Rn, Rt))
5240       return Error(Loc[0], "unpredictable STP instruction, writeback base "
5241                            "is also a source");
5242     if (RI->isSubRegisterEq(Rn, Rt2))
5243       return Error(Loc[1], "unpredictable STP instruction, writeback base "
5244                            "is also a source");
5245     break;
5246   }
5247   case AArch64::LDRBBpre:
5248   case AArch64::LDRBpre:
5249   case AArch64::LDRHHpre:
5250   case AArch64::LDRHpre:
5251   case AArch64::LDRSBWpre:
5252   case AArch64::LDRSBXpre:
5253   case AArch64::LDRSHWpre:
5254   case AArch64::LDRSHXpre:
5255   case AArch64::LDRSWpre:
5256   case AArch64::LDRWpre:
5257   case AArch64::LDRXpre:
5258   case AArch64::LDRBBpost:
5259   case AArch64::LDRBpost:
5260   case AArch64::LDRHHpost:
5261   case AArch64::LDRHpost:
5262   case AArch64::LDRSBWpost:
5263   case AArch64::LDRSBXpost:
5264   case AArch64::LDRSHWpost:
5265   case AArch64::LDRSHXpost:
5266   case AArch64::LDRSWpost:
5267   case AArch64::LDRWpost:
5268   case AArch64::LDRXpost: {
5269     unsigned Rt = Inst.getOperand(1).getReg();
5270     unsigned Rn = Inst.getOperand(2).getReg();
5271     if (RI->isSubRegisterEq(Rn, Rt))
5272       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5273                            "is also a source");
5274     break;
5275   }
5276   case AArch64::STRBBpost:
5277   case AArch64::STRBpost:
5278   case AArch64::STRHHpost:
5279   case AArch64::STRHpost:
5280   case AArch64::STRWpost:
5281   case AArch64::STRXpost:
5282   case AArch64::STRBBpre:
5283   case AArch64::STRBpre:
5284   case AArch64::STRHHpre:
5285   case AArch64::STRHpre:
5286   case AArch64::STRWpre:
5287   case AArch64::STRXpre: {
5288     unsigned Rt = Inst.getOperand(1).getReg();
5289     unsigned Rn = Inst.getOperand(2).getReg();
5290     if (RI->isSubRegisterEq(Rn, Rt))
5291       return Error(Loc[0], "unpredictable STR instruction, writeback base "
5292                            "is also a source");
5293     break;
5294   }
5295   case AArch64::STXRB:
5296   case AArch64::STXRH:
5297   case AArch64::STXRW:
5298   case AArch64::STXRX:
5299   case AArch64::STLXRB:
5300   case AArch64::STLXRH:
5301   case AArch64::STLXRW:
5302   case AArch64::STLXRX: {
5303     unsigned Rs = Inst.getOperand(0).getReg();
5304     unsigned Rt = Inst.getOperand(1).getReg();
5305     unsigned Rn = Inst.getOperand(2).getReg();
5306     if (RI->isSubRegisterEq(Rt, Rs) ||
5307         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5308       return Error(Loc[0],
5309                    "unpredictable STXR instruction, status is also a source");
5310     break;
5311   }
5312   case AArch64::STXPW:
5313   case AArch64::STXPX:
5314   case AArch64::STLXPW:
5315   case AArch64::STLXPX: {
5316     unsigned Rs = Inst.getOperand(0).getReg();
5317     unsigned Rt1 = Inst.getOperand(1).getReg();
5318     unsigned Rt2 = Inst.getOperand(2).getReg();
5319     unsigned Rn = Inst.getOperand(3).getReg();
5320     if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5321         (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5322       return Error(Loc[0],
5323                    "unpredictable STXP instruction, status is also a source");
5324     break;
5325   }
5326   case AArch64::LDRABwriteback:
5327   case AArch64::LDRAAwriteback: {
5328     unsigned Xt = Inst.getOperand(0).getReg();
5329     unsigned Xn = Inst.getOperand(1).getReg();
5330     if (Xt == Xn)
5331       return Error(Loc[0],
5332           "unpredictable LDRA instruction, writeback base"
5333           " is also a destination");
5334     break;
5335   }
5336   }
5337 
5338   // Check v8.8-A memops instructions.
5339   switch (Inst.getOpcode()) {
5340   case AArch64::CPYFP:
5341   case AArch64::CPYFPWN:
5342   case AArch64::CPYFPRN:
5343   case AArch64::CPYFPN:
5344   case AArch64::CPYFPWT:
5345   case AArch64::CPYFPWTWN:
5346   case AArch64::CPYFPWTRN:
5347   case AArch64::CPYFPWTN:
5348   case AArch64::CPYFPRT:
5349   case AArch64::CPYFPRTWN:
5350   case AArch64::CPYFPRTRN:
5351   case AArch64::CPYFPRTN:
5352   case AArch64::CPYFPT:
5353   case AArch64::CPYFPTWN:
5354   case AArch64::CPYFPTRN:
5355   case AArch64::CPYFPTN:
5356   case AArch64::CPYFM:
5357   case AArch64::CPYFMWN:
5358   case AArch64::CPYFMRN:
5359   case AArch64::CPYFMN:
5360   case AArch64::CPYFMWT:
5361   case AArch64::CPYFMWTWN:
5362   case AArch64::CPYFMWTRN:
5363   case AArch64::CPYFMWTN:
5364   case AArch64::CPYFMRT:
5365   case AArch64::CPYFMRTWN:
5366   case AArch64::CPYFMRTRN:
5367   case AArch64::CPYFMRTN:
5368   case AArch64::CPYFMT:
5369   case AArch64::CPYFMTWN:
5370   case AArch64::CPYFMTRN:
5371   case AArch64::CPYFMTN:
5372   case AArch64::CPYFE:
5373   case AArch64::CPYFEWN:
5374   case AArch64::CPYFERN:
5375   case AArch64::CPYFEN:
5376   case AArch64::CPYFEWT:
5377   case AArch64::CPYFEWTWN:
5378   case AArch64::CPYFEWTRN:
5379   case AArch64::CPYFEWTN:
5380   case AArch64::CPYFERT:
5381   case AArch64::CPYFERTWN:
5382   case AArch64::CPYFERTRN:
5383   case AArch64::CPYFERTN:
5384   case AArch64::CPYFET:
5385   case AArch64::CPYFETWN:
5386   case AArch64::CPYFETRN:
5387   case AArch64::CPYFETN:
5388   case AArch64::CPYP:
5389   case AArch64::CPYPWN:
5390   case AArch64::CPYPRN:
5391   case AArch64::CPYPN:
5392   case AArch64::CPYPWT:
5393   case AArch64::CPYPWTWN:
5394   case AArch64::CPYPWTRN:
5395   case AArch64::CPYPWTN:
5396   case AArch64::CPYPRT:
5397   case AArch64::CPYPRTWN:
5398   case AArch64::CPYPRTRN:
5399   case AArch64::CPYPRTN:
5400   case AArch64::CPYPT:
5401   case AArch64::CPYPTWN:
5402   case AArch64::CPYPTRN:
5403   case AArch64::CPYPTN:
5404   case AArch64::CPYM:
5405   case AArch64::CPYMWN:
5406   case AArch64::CPYMRN:
5407   case AArch64::CPYMN:
5408   case AArch64::CPYMWT:
5409   case AArch64::CPYMWTWN:
5410   case AArch64::CPYMWTRN:
5411   case AArch64::CPYMWTN:
5412   case AArch64::CPYMRT:
5413   case AArch64::CPYMRTWN:
5414   case AArch64::CPYMRTRN:
5415   case AArch64::CPYMRTN:
5416   case AArch64::CPYMT:
5417   case AArch64::CPYMTWN:
5418   case AArch64::CPYMTRN:
5419   case AArch64::CPYMTN:
5420   case AArch64::CPYE:
5421   case AArch64::CPYEWN:
5422   case AArch64::CPYERN:
5423   case AArch64::CPYEN:
5424   case AArch64::CPYEWT:
5425   case AArch64::CPYEWTWN:
5426   case AArch64::CPYEWTRN:
5427   case AArch64::CPYEWTN:
5428   case AArch64::CPYERT:
5429   case AArch64::CPYERTWN:
5430   case AArch64::CPYERTRN:
5431   case AArch64::CPYERTN:
5432   case AArch64::CPYET:
5433   case AArch64::CPYETWN:
5434   case AArch64::CPYETRN:
5435   case AArch64::CPYETN: {
5436     unsigned Xd_wb = Inst.getOperand(0).getReg();
5437     unsigned Xs_wb = Inst.getOperand(1).getReg();
5438     unsigned Xn_wb = Inst.getOperand(2).getReg();
5439     unsigned Xd = Inst.getOperand(3).getReg();
5440     unsigned Xs = Inst.getOperand(4).getReg();
5441     unsigned Xn = Inst.getOperand(5).getReg();
5442     if (Xd_wb != Xd)
5443       return Error(Loc[0],
5444                    "invalid CPY instruction, Xd_wb and Xd do not match");
5445     if (Xs_wb != Xs)
5446       return Error(Loc[0],
5447                    "invalid CPY instruction, Xs_wb and Xs do not match");
5448     if (Xn_wb != Xn)
5449       return Error(Loc[0],
5450                    "invalid CPY instruction, Xn_wb and Xn do not match");
5451     if (Xd == Xs)
5452       return Error(Loc[0], "invalid CPY instruction, destination and source"
5453                            " registers are the same");
5454     if (Xd == Xn)
5455       return Error(Loc[0], "invalid CPY instruction, destination and size"
5456                            " registers are the same");
5457     if (Xs == Xn)
5458       return Error(Loc[0], "invalid CPY instruction, source and size"
5459                            " registers are the same");
5460     break;
5461   }
5462   case AArch64::SETP:
5463   case AArch64::SETPT:
5464   case AArch64::SETPN:
5465   case AArch64::SETPTN:
5466   case AArch64::SETM:
5467   case AArch64::SETMT:
5468   case AArch64::SETMN:
5469   case AArch64::SETMTN:
5470   case AArch64::SETE:
5471   case AArch64::SETET:
5472   case AArch64::SETEN:
5473   case AArch64::SETETN:
5474   case AArch64::SETGP:
5475   case AArch64::SETGPT:
5476   case AArch64::SETGPN:
5477   case AArch64::SETGPTN:
5478   case AArch64::SETGM:
5479   case AArch64::SETGMT:
5480   case AArch64::SETGMN:
5481   case AArch64::SETGMTN:
5482   case AArch64::MOPSSETGE:
5483   case AArch64::MOPSSETGET:
5484   case AArch64::MOPSSETGEN:
5485   case AArch64::MOPSSETGETN: {
5486     unsigned Xd_wb = Inst.getOperand(0).getReg();
5487     unsigned Xn_wb = Inst.getOperand(1).getReg();
5488     unsigned Xd = Inst.getOperand(2).getReg();
5489     unsigned Xn = Inst.getOperand(3).getReg();
5490     unsigned Xm = Inst.getOperand(4).getReg();
5491     if (Xd_wb != Xd)
5492       return Error(Loc[0],
5493                    "invalid SET instruction, Xd_wb and Xd do not match");
5494     if (Xn_wb != Xn)
5495       return Error(Loc[0],
5496                    "invalid SET instruction, Xn_wb and Xn do not match");
5497     if (Xd == Xn)
5498       return Error(Loc[0], "invalid SET instruction, destination and size"
5499                            " registers are the same");
5500     if (Xd == Xm)
5501       return Error(Loc[0], "invalid SET instruction, destination and source"
5502                            " registers are the same");
5503     if (Xn == Xm)
5504       return Error(Loc[0], "invalid SET instruction, source and size"
5505                            " registers are the same");
5506     break;
5507   }
5508   }
5509 
5510   // Now check immediate ranges. Separate from the above as there is overlap
5511   // in the instructions being checked and this keeps the nested conditionals
5512   // to a minimum.
5513   switch (Inst.getOpcode()) {
5514   case AArch64::ADDSWri:
5515   case AArch64::ADDSXri:
5516   case AArch64::ADDWri:
5517   case AArch64::ADDXri:
5518   case AArch64::SUBSWri:
5519   case AArch64::SUBSXri:
5520   case AArch64::SUBWri:
5521   case AArch64::SUBXri: {
5522     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5523     // some slight duplication here.
5524     if (Inst.getOperand(2).isExpr()) {
5525       const MCExpr *Expr = Inst.getOperand(2).getExpr();
5526       AArch64MCExpr::VariantKind ELFRefKind;
5527       MCSymbolRefExpr::VariantKind DarwinRefKind;
5528       int64_t Addend;
5529       if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5530 
5531         // Only allow these with ADDXri.
5532         if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
5533              DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
5534             Inst.getOpcode() == AArch64::ADDXri)
5535           return false;
5536 
5537         // Only allow these with ADDXri/ADDWri
5538         if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
5539              ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
5540              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
5541              ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
5542              ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
5543              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
5544              ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
5545              ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
5546              ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
5547              ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
5548             (Inst.getOpcode() == AArch64::ADDXri ||
5549              Inst.getOpcode() == AArch64::ADDWri))
5550           return false;
5551 
5552         // Don't allow symbol refs in the immediate field otherwise
5553         // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5554         // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5555         // 'cmp w0, 'borked')
5556         return Error(Loc.back(), "invalid immediate expression");
5557       }
5558       // We don't validate more complex expressions here
5559     }
5560     return false;
5561   }
5562   default:
5563     return false;
5564   }
5565 }
5566 
5567 static std::string AArch64MnemonicSpellCheck(StringRef S,
5568                                              const FeatureBitset &FBS,
5569                                              unsigned VariantID = 0);
5570 
5571 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5572                                       uint64_t ErrorInfo,
5573                                       OperandVector &Operands) {
5574   switch (ErrCode) {
5575   case Match_InvalidTiedOperand: {
5576     auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5577     if (Op.isVectorList())
5578       return Error(Loc, "operand must match destination register list");
5579 
5580     assert(Op.isReg() && "Unexpected operand type");
5581     switch (Op.getRegEqualityTy()) {
5582     case RegConstraintEqualityTy::EqualsSubReg:
5583       return Error(Loc, "operand must be 64-bit form of destination register");
5584     case RegConstraintEqualityTy::EqualsSuperReg:
5585       return Error(Loc, "operand must be 32-bit form of destination register");
5586     case RegConstraintEqualityTy::EqualsReg:
5587       return Error(Loc, "operand must match destination register");
5588     }
5589     llvm_unreachable("Unknown RegConstraintEqualityTy");
5590   }
5591   case Match_MissingFeature:
5592     return Error(Loc,
5593                  "instruction requires a CPU feature not currently enabled");
5594   case Match_InvalidOperand:
5595     return Error(Loc, "invalid operand for instruction");
5596   case Match_InvalidSuffix:
5597     return Error(Loc, "invalid type suffix for instruction");
5598   case Match_InvalidCondCode:
5599     return Error(Loc, "expected AArch64 condition code");
5600   case Match_AddSubRegExtendSmall:
5601     return Error(Loc,
5602       "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5603   case Match_AddSubRegExtendLarge:
5604     return Error(Loc,
5605       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5606   case Match_AddSubSecondSource:
5607     return Error(Loc,
5608       "expected compatible register, symbol or integer in range [0, 4095]");
5609   case Match_LogicalSecondSource:
5610     return Error(Loc, "expected compatible register or logical immediate");
5611   case Match_InvalidMovImm32Shift:
5612     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5613   case Match_InvalidMovImm64Shift:
5614     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5615   case Match_AddSubRegShift32:
5616     return Error(Loc,
5617        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5618   case Match_AddSubRegShift64:
5619     return Error(Loc,
5620        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5621   case Match_InvalidFPImm:
5622     return Error(Loc,
5623                  "expected compatible register or floating-point constant");
5624   case Match_InvalidMemoryIndexedSImm6:
5625     return Error(Loc, "index must be an integer in range [-32, 31].");
5626   case Match_InvalidMemoryIndexedSImm5:
5627     return Error(Loc, "index must be an integer in range [-16, 15].");
5628   case Match_InvalidMemoryIndexed1SImm4:
5629     return Error(Loc, "index must be an integer in range [-8, 7].");
5630   case Match_InvalidMemoryIndexed2SImm4:
5631     return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5632   case Match_InvalidMemoryIndexed3SImm4:
5633     return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5634   case Match_InvalidMemoryIndexed4SImm4:
5635     return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5636   case Match_InvalidMemoryIndexed16SImm4:
5637     return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5638   case Match_InvalidMemoryIndexed32SImm4:
5639     return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5640   case Match_InvalidMemoryIndexed1SImm6:
5641     return Error(Loc, "index must be an integer in range [-32, 31].");
5642   case Match_InvalidMemoryIndexedSImm8:
5643     return Error(Loc, "index must be an integer in range [-128, 127].");
5644   case Match_InvalidMemoryIndexedSImm9:
5645     return Error(Loc, "index must be an integer in range [-256, 255].");
5646   case Match_InvalidMemoryIndexed16SImm9:
5647     return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5648   case Match_InvalidMemoryIndexed8SImm10:
5649     return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5650   case Match_InvalidMemoryIndexed4SImm7:
5651     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5652   case Match_InvalidMemoryIndexed8SImm7:
5653     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5654   case Match_InvalidMemoryIndexed16SImm7:
5655     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5656   case Match_InvalidMemoryIndexed8UImm5:
5657     return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5658   case Match_InvalidMemoryIndexed8UImm3:
5659     return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5660   case Match_InvalidMemoryIndexed4UImm5:
5661     return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5662   case Match_InvalidMemoryIndexed2UImm5:
5663     return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5664   case Match_InvalidMemoryIndexed8UImm6:
5665     return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5666   case Match_InvalidMemoryIndexed16UImm6:
5667     return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5668   case Match_InvalidMemoryIndexed4UImm6:
5669     return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5670   case Match_InvalidMemoryIndexed2UImm6:
5671     return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5672   case Match_InvalidMemoryIndexed1UImm6:
5673     return Error(Loc, "index must be in range [0, 63].");
5674   case Match_InvalidMemoryWExtend8:
5675     return Error(Loc,
5676                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
5677   case Match_InvalidMemoryWExtend16:
5678     return Error(Loc,
5679                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5680   case Match_InvalidMemoryWExtend32:
5681     return Error(Loc,
5682                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5683   case Match_InvalidMemoryWExtend64:
5684     return Error(Loc,
5685                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5686   case Match_InvalidMemoryWExtend128:
5687     return Error(Loc,
5688                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
5689   case Match_InvalidMemoryXExtend8:
5690     return Error(Loc,
5691                  "expected 'lsl' or 'sxtx' with optional shift of #0");
5692   case Match_InvalidMemoryXExtend16:
5693     return Error(Loc,
5694                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
5695   case Match_InvalidMemoryXExtend32:
5696     return Error(Loc,
5697                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
5698   case Match_InvalidMemoryXExtend64:
5699     return Error(Loc,
5700                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
5701   case Match_InvalidMemoryXExtend128:
5702     return Error(Loc,
5703                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
5704   case Match_InvalidMemoryIndexed1:
5705     return Error(Loc, "index must be an integer in range [0, 4095].");
5706   case Match_InvalidMemoryIndexed2:
5707     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
5708   case Match_InvalidMemoryIndexed4:
5709     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
5710   case Match_InvalidMemoryIndexed8:
5711     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
5712   case Match_InvalidMemoryIndexed16:
5713     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
5714   case Match_InvalidImm0_0:
5715     return Error(Loc, "immediate must be 0.");
5716   case Match_InvalidImm0_1:
5717     return Error(Loc, "immediate must be an integer in range [0, 1].");
5718   case Match_InvalidImm0_3:
5719     return Error(Loc, "immediate must be an integer in range [0, 3].");
5720   case Match_InvalidImm0_7:
5721     return Error(Loc, "immediate must be an integer in range [0, 7].");
5722   case Match_InvalidImm0_15:
5723     return Error(Loc, "immediate must be an integer in range [0, 15].");
5724   case Match_InvalidImm0_31:
5725     return Error(Loc, "immediate must be an integer in range [0, 31].");
5726   case Match_InvalidImm0_63:
5727     return Error(Loc, "immediate must be an integer in range [0, 63].");
5728   case Match_InvalidImm0_127:
5729     return Error(Loc, "immediate must be an integer in range [0, 127].");
5730   case Match_InvalidImm0_255:
5731     return Error(Loc, "immediate must be an integer in range [0, 255].");
5732   case Match_InvalidImm0_65535:
5733     return Error(Loc, "immediate must be an integer in range [0, 65535].");
5734   case Match_InvalidImm1_8:
5735     return Error(Loc, "immediate must be an integer in range [1, 8].");
5736   case Match_InvalidImm1_16:
5737     return Error(Loc, "immediate must be an integer in range [1, 16].");
5738   case Match_InvalidImm1_32:
5739     return Error(Loc, "immediate must be an integer in range [1, 32].");
5740   case Match_InvalidImm1_64:
5741     return Error(Loc, "immediate must be an integer in range [1, 64].");
5742   case Match_InvalidMemoryIndexedRange2UImm0:
5743     return Error(Loc, "vector select offset must be the immediate range 0:1.");
5744   case Match_InvalidMemoryIndexedRange2UImm1:
5745     return Error(Loc, "vector select offset must be an immediate range of the "
5746                       "form <immf>:<imml>, where the first "
5747                       "immediate is a multiple of 2 in the range [0, 2], and "
5748                       "the second immediate is immf + 1.");
5749   case Match_InvalidMemoryIndexedRange2UImm2:
5750   case Match_InvalidMemoryIndexedRange2UImm3:
5751     return Error(
5752         Loc,
5753         "vector select offset must be an immediate range of the form "
5754         "<immf>:<imml>, "
5755         "where the first immediate is a multiple of 2 in the range [0, 6] or "
5756         "[0, 14] "
5757         "depending on the instruction, and the second immediate is immf + 1.");
5758   case Match_InvalidMemoryIndexedRange4UImm0:
5759     return Error(Loc, "vector select offset must be the immediate range 0:3.");
5760   case Match_InvalidMemoryIndexedRange4UImm1:
5761   case Match_InvalidMemoryIndexedRange4UImm2:
5762     return Error(
5763         Loc,
5764         "vector select offset must be an immediate range of the form "
5765         "<immf>:<imml>, "
5766         "where the first immediate is a multiple of 4 in the range [0, 4] or "
5767         "[0, 12] "
5768         "depending on the instruction, and the second immediate is immf + 3.");
5769   case Match_InvalidSVEAddSubImm8:
5770     return Error(Loc, "immediate must be an integer in range [0, 255]"
5771                       " with a shift amount of 0");
5772   case Match_InvalidSVEAddSubImm16:
5773   case Match_InvalidSVEAddSubImm32:
5774   case Match_InvalidSVEAddSubImm64:
5775     return Error(Loc, "immediate must be an integer in range [0, 255] or a "
5776                       "multiple of 256 in range [256, 65280]");
5777   case Match_InvalidSVECpyImm8:
5778     return Error(Loc, "immediate must be an integer in range [-128, 255]"
5779                       " with a shift amount of 0");
5780   case Match_InvalidSVECpyImm16:
5781     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5782                       "multiple of 256 in range [-32768, 65280]");
5783   case Match_InvalidSVECpyImm32:
5784   case Match_InvalidSVECpyImm64:
5785     return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
5786                       "multiple of 256 in range [-32768, 32512]");
5787   case Match_InvalidIndexRange0_0:
5788     return Error(Loc, "expected lane specifier '[0]'");
5789   case Match_InvalidIndexRange1_1:
5790     return Error(Loc, "expected lane specifier '[1]'");
5791   case Match_InvalidIndexRange0_15:
5792     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5793   case Match_InvalidIndexRange0_7:
5794     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5795   case Match_InvalidIndexRange0_3:
5796     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5797   case Match_InvalidIndexRange0_1:
5798     return Error(Loc, "vector lane must be an integer in range [0, 1].");
5799   case Match_InvalidSVEIndexRange0_63:
5800     return Error(Loc, "vector lane must be an integer in range [0, 63].");
5801   case Match_InvalidSVEIndexRange0_31:
5802     return Error(Loc, "vector lane must be an integer in range [0, 31].");
5803   case Match_InvalidSVEIndexRange0_15:
5804     return Error(Loc, "vector lane must be an integer in range [0, 15].");
5805   case Match_InvalidSVEIndexRange0_7:
5806     return Error(Loc, "vector lane must be an integer in range [0, 7].");
5807   case Match_InvalidSVEIndexRange0_3:
5808     return Error(Loc, "vector lane must be an integer in range [0, 3].");
5809   case Match_InvalidLabel:
5810     return Error(Loc, "expected label or encodable integer pc offset");
5811   case Match_MRS:
5812     return Error(Loc, "expected readable system register");
5813   case Match_MSR:
5814   case Match_InvalidSVCR:
5815     return Error(Loc, "expected writable system register or pstate");
5816   case Match_InvalidComplexRotationEven:
5817     return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
5818   case Match_InvalidComplexRotationOdd:
5819     return Error(Loc, "complex rotation must be 90 or 270.");
5820   case Match_MnemonicFail: {
5821     std::string Suggestion = AArch64MnemonicSpellCheck(
5822         ((AArch64Operand &)*Operands[0]).getToken(),
5823         ComputeAvailableFeatures(STI->getFeatureBits()));
5824     return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
5825   }
5826   case Match_InvalidGPR64shifted8:
5827     return Error(Loc, "register must be x0..x30 or xzr, without shift");
5828   case Match_InvalidGPR64shifted16:
5829     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
5830   case Match_InvalidGPR64shifted32:
5831     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
5832   case Match_InvalidGPR64shifted64:
5833     return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
5834   case Match_InvalidGPR64shifted128:
5835     return Error(
5836         Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
5837   case Match_InvalidGPR64NoXZRshifted8:
5838     return Error(Loc, "register must be x0..x30 without shift");
5839   case Match_InvalidGPR64NoXZRshifted16:
5840     return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
5841   case Match_InvalidGPR64NoXZRshifted32:
5842     return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
5843   case Match_InvalidGPR64NoXZRshifted64:
5844     return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
5845   case Match_InvalidGPR64NoXZRshifted128:
5846     return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
5847   case Match_InvalidZPR32UXTW8:
5848   case Match_InvalidZPR32SXTW8:
5849     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
5850   case Match_InvalidZPR32UXTW16:
5851   case Match_InvalidZPR32SXTW16:
5852     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
5853   case Match_InvalidZPR32UXTW32:
5854   case Match_InvalidZPR32SXTW32:
5855     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
5856   case Match_InvalidZPR32UXTW64:
5857   case Match_InvalidZPR32SXTW64:
5858     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
5859   case Match_InvalidZPR64UXTW8:
5860   case Match_InvalidZPR64SXTW8:
5861     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
5862   case Match_InvalidZPR64UXTW16:
5863   case Match_InvalidZPR64SXTW16:
5864     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
5865   case Match_InvalidZPR64UXTW32:
5866   case Match_InvalidZPR64SXTW32:
5867     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
5868   case Match_InvalidZPR64UXTW64:
5869   case Match_InvalidZPR64SXTW64:
5870     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
5871   case Match_InvalidZPR32LSL8:
5872     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
5873   case Match_InvalidZPR32LSL16:
5874     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
5875   case Match_InvalidZPR32LSL32:
5876     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
5877   case Match_InvalidZPR32LSL64:
5878     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
5879   case Match_InvalidZPR64LSL8:
5880     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
5881   case Match_InvalidZPR64LSL16:
5882     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
5883   case Match_InvalidZPR64LSL32:
5884     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
5885   case Match_InvalidZPR64LSL64:
5886     return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
5887   case Match_InvalidZPR0:
5888     return Error(Loc, "expected register without element width suffix");
5889   case Match_InvalidZPR8:
5890   case Match_InvalidZPR16:
5891   case Match_InvalidZPR32:
5892   case Match_InvalidZPR64:
5893   case Match_InvalidZPR128:
5894     return Error(Loc, "invalid element width");
5895   case Match_InvalidZPR_3b8:
5896     return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
5897   case Match_InvalidZPR_3b16:
5898     return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
5899   case Match_InvalidZPR_3b32:
5900     return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
5901   case Match_InvalidZPR_4b8:
5902     return Error(Loc,
5903                  "Invalid restricted vector register, expected z0.b..z15.b");
5904   case Match_InvalidZPR_4b16:
5905     return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
5906   case Match_InvalidZPR_4b32:
5907     return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
5908   case Match_InvalidZPR_4b64:
5909     return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
5910   case Match_InvalidSVEPattern:
5911     return Error(Loc, "invalid predicate pattern");
5912   case Match_InvalidSVEPredicateAnyReg:
5913   case Match_InvalidSVEPredicateBReg:
5914   case Match_InvalidSVEPredicateHReg:
5915   case Match_InvalidSVEPredicateSReg:
5916   case Match_InvalidSVEPredicateDReg:
5917     return Error(Loc, "invalid predicate register.");
5918   case Match_InvalidSVEPredicate3bAnyReg:
5919     return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
5920   case Match_InvalidSVEPNPredicateB_p8to15Reg:
5921   case Match_InvalidSVEPNPredicateH_p8to15Reg:
5922   case Match_InvalidSVEPNPredicateS_p8to15Reg:
5923   case Match_InvalidSVEPNPredicateD_p8to15Reg:
5924     return Error(Loc, "Invalid predicate register, expected PN in range "
5925                       "pn8..pn15 with element suffix.");
5926   case Match_InvalidSVEPNPredicateAny_p8to15Reg:
5927     return Error(Loc, "invalid restricted predicate-as-counter register "
5928                       "expected pn8..pn15");
5929   case Match_InvalidSVEPNPredicateBReg:
5930   case Match_InvalidSVEPNPredicateHReg:
5931   case Match_InvalidSVEPNPredicateSReg:
5932   case Match_InvalidSVEPNPredicateDReg:
5933     return Error(Loc, "Invalid predicate register, expected PN in range "
5934                       "pn0..pn15 with element suffix.");
5935   case Match_InvalidSVEVecLenSpecifier:
5936     return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
5937   case Match_InvalidSVEPredicateListMul2x8:
5938   case Match_InvalidSVEPredicateListMul2x16:
5939   case Match_InvalidSVEPredicateListMul2x32:
5940   case Match_InvalidSVEPredicateListMul2x64:
5941     return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
5942                       "predicate registers, where the first vector is a multiple of 2 "
5943                       "and with correct element type");
5944   case Match_InvalidSVEExactFPImmOperandHalfOne:
5945     return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
5946   case Match_InvalidSVEExactFPImmOperandHalfTwo:
5947     return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
5948   case Match_InvalidSVEExactFPImmOperandZeroOne:
5949     return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
5950   case Match_InvalidMatrixTileVectorH8:
5951   case Match_InvalidMatrixTileVectorV8:
5952     return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
5953   case Match_InvalidMatrixTileVectorH16:
5954   case Match_InvalidMatrixTileVectorV16:
5955     return Error(Loc,
5956                  "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
5957   case Match_InvalidMatrixTileVectorH32:
5958   case Match_InvalidMatrixTileVectorV32:
5959     return Error(Loc,
5960                  "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
5961   case Match_InvalidMatrixTileVectorH64:
5962   case Match_InvalidMatrixTileVectorV64:
5963     return Error(Loc,
5964                  "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
5965   case Match_InvalidMatrixTileVectorH128:
5966   case Match_InvalidMatrixTileVectorV128:
5967     return Error(Loc,
5968                  "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
5969   case Match_InvalidMatrixTile32:
5970     return Error(Loc, "invalid matrix operand, expected za[0-3].s");
5971   case Match_InvalidMatrixTile64:
5972     return Error(Loc, "invalid matrix operand, expected za[0-7].d");
5973   case Match_InvalidMatrix:
5974     return Error(Loc, "invalid matrix operand, expected za");
5975   case Match_InvalidMatrix8:
5976     return Error(Loc, "invalid matrix operand, expected suffix .b");
5977   case Match_InvalidMatrix16:
5978     return Error(Loc, "invalid matrix operand, expected suffix .h");
5979   case Match_InvalidMatrix32:
5980     return Error(Loc, "invalid matrix operand, expected suffix .s");
5981   case Match_InvalidMatrix64:
5982     return Error(Loc, "invalid matrix operand, expected suffix .d");
5983   case Match_InvalidMatrixIndexGPR32_12_15:
5984     return Error(Loc, "operand must be a register in range [w12, w15]");
5985   case Match_InvalidMatrixIndexGPR32_8_11:
5986     return Error(Loc, "operand must be a register in range [w8, w11]");
5987   case Match_InvalidSVEVectorListMul2x8:
5988   case Match_InvalidSVEVectorListMul2x16:
5989   case Match_InvalidSVEVectorListMul2x32:
5990   case Match_InvalidSVEVectorListMul2x64:
5991     return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
5992                       "SVE vectors, where the first vector is a multiple of 2 "
5993                       "and with matching element types");
5994   case Match_InvalidSVEVectorListMul4x8:
5995   case Match_InvalidSVEVectorListMul4x16:
5996   case Match_InvalidSVEVectorListMul4x32:
5997   case Match_InvalidSVEVectorListMul4x64:
5998     return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
5999                       "SVE vectors, where the first vector is a multiple of 4 "
6000                       "and with matching element types");
6001   case Match_InvalidLookupTable:
6002     return Error(Loc, "Invalid lookup table, expected zt0");
6003   case Match_InvalidSVEVectorListStrided2x8:
6004   case Match_InvalidSVEVectorListStrided2x16:
6005   case Match_InvalidSVEVectorListStrided2x32:
6006   case Match_InvalidSVEVectorListStrided2x64:
6007     return Error(
6008         Loc,
6009         "Invalid vector list, expected list with each SVE vector in the list "
6010         "8 registers apart, and the first register in the range [z0, z7] or "
6011         "[z16, z23] and with correct element type");
6012   case Match_InvalidSVEVectorListStrided4x8:
6013   case Match_InvalidSVEVectorListStrided4x16:
6014   case Match_InvalidSVEVectorListStrided4x32:
6015   case Match_InvalidSVEVectorListStrided4x64:
6016     return Error(
6017         Loc,
6018         "Invalid vector list, expected list with each SVE vector in the list "
6019         "4 registers apart, and the first register in the range [z0, z3] or "
6020         "[z16, z19] and with correct element type");
6021   default:
6022     llvm_unreachable("unexpected error code!");
6023   }
6024 }
6025 
6026 static const char *getSubtargetFeatureName(uint64_t Val);
6027 
6028 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6029                                                OperandVector &Operands,
6030                                                MCStreamer &Out,
6031                                                uint64_t &ErrorInfo,
6032                                                bool MatchingInlineAsm) {
6033   assert(!Operands.empty() && "Unexpect empty operand list!");
6034   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6035   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6036 
6037   StringRef Tok = Op.getToken();
6038   unsigned NumOperands = Operands.size();
6039 
6040   if (NumOperands == 4 && Tok == "lsl") {
6041     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6042     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6043     if (Op2.isScalarReg() && Op3.isImm()) {
6044       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6045       if (Op3CE) {
6046         uint64_t Op3Val = Op3CE->getValue();
6047         uint64_t NewOp3Val = 0;
6048         uint64_t NewOp4Val = 0;
6049         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6050                 Op2.getReg())) {
6051           NewOp3Val = (32 - Op3Val) & 0x1f;
6052           NewOp4Val = 31 - Op3Val;
6053         } else {
6054           NewOp3Val = (64 - Op3Val) & 0x3f;
6055           NewOp4Val = 63 - Op3Val;
6056         }
6057 
6058         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6059         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6060 
6061         Operands[0] =
6062             AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6063         Operands.push_back(AArch64Operand::CreateImm(
6064             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6065         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6066                                                 Op3.getEndLoc(), getContext());
6067       }
6068     }
6069   } else if (NumOperands == 4 && Tok == "bfc") {
6070     // FIXME: Horrible hack to handle BFC->BFM alias.
6071     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6072     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6073     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6074 
6075     if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6076       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6077       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6078 
6079       if (LSBCE && WidthCE) {
6080         uint64_t LSB = LSBCE->getValue();
6081         uint64_t Width = WidthCE->getValue();
6082 
6083         uint64_t RegWidth = 0;
6084         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6085                 Op1.getReg()))
6086           RegWidth = 64;
6087         else
6088           RegWidth = 32;
6089 
6090         if (LSB >= RegWidth)
6091           return Error(LSBOp.getStartLoc(),
6092                        "expected integer in range [0, 31]");
6093         if (Width < 1 || Width > RegWidth)
6094           return Error(WidthOp.getStartLoc(),
6095                        "expected integer in range [1, 32]");
6096 
6097         uint64_t ImmR = 0;
6098         if (RegWidth == 32)
6099           ImmR = (32 - LSB) & 0x1f;
6100         else
6101           ImmR = (64 - LSB) & 0x3f;
6102 
6103         uint64_t ImmS = Width - 1;
6104 
6105         if (ImmR != 0 && ImmS >= ImmR)
6106           return Error(WidthOp.getStartLoc(),
6107                        "requested insert overflows register");
6108 
6109         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6110         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6111         Operands[0] =
6112             AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6113         Operands[2] = AArch64Operand::CreateReg(
6114             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6115             SMLoc(), SMLoc(), getContext());
6116         Operands[3] = AArch64Operand::CreateImm(
6117             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6118         Operands.emplace_back(
6119             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6120                                       WidthOp.getEndLoc(), getContext()));
6121       }
6122     }
6123   } else if (NumOperands == 5) {
6124     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6125     // UBFIZ -> UBFM aliases.
6126     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6127       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6128       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6129       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6130 
6131       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6132         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6133         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6134 
6135         if (Op3CE && Op4CE) {
6136           uint64_t Op3Val = Op3CE->getValue();
6137           uint64_t Op4Val = Op4CE->getValue();
6138 
6139           uint64_t RegWidth = 0;
6140           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6141                   Op1.getReg()))
6142             RegWidth = 64;
6143           else
6144             RegWidth = 32;
6145 
6146           if (Op3Val >= RegWidth)
6147             return Error(Op3.getStartLoc(),
6148                          "expected integer in range [0, 31]");
6149           if (Op4Val < 1 || Op4Val > RegWidth)
6150             return Error(Op4.getStartLoc(),
6151                          "expected integer in range [1, 32]");
6152 
6153           uint64_t NewOp3Val = 0;
6154           if (RegWidth == 32)
6155             NewOp3Val = (32 - Op3Val) & 0x1f;
6156           else
6157             NewOp3Val = (64 - Op3Val) & 0x3f;
6158 
6159           uint64_t NewOp4Val = Op4Val - 1;
6160 
6161           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6162             return Error(Op4.getStartLoc(),
6163                          "requested insert overflows register");
6164 
6165           const MCExpr *NewOp3 =
6166               MCConstantExpr::create(NewOp3Val, getContext());
6167           const MCExpr *NewOp4 =
6168               MCConstantExpr::create(NewOp4Val, getContext());
6169           Operands[3] = AArch64Operand::CreateImm(
6170               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6171           Operands[4] = AArch64Operand::CreateImm(
6172               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6173           if (Tok == "bfi")
6174             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6175                                                       getContext());
6176           else if (Tok == "sbfiz")
6177             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6178                                                       getContext());
6179           else if (Tok == "ubfiz")
6180             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6181                                                       getContext());
6182           else
6183             llvm_unreachable("No valid mnemonic for alias?");
6184         }
6185       }
6186 
6187       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6188       // UBFX -> UBFM aliases.
6189     } else if (NumOperands == 5 &&
6190                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6191       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6192       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6193       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6194 
6195       if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6196         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6197         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6198 
6199         if (Op3CE && Op4CE) {
6200           uint64_t Op3Val = Op3CE->getValue();
6201           uint64_t Op4Val = Op4CE->getValue();
6202 
6203           uint64_t RegWidth = 0;
6204           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6205                   Op1.getReg()))
6206             RegWidth = 64;
6207           else
6208             RegWidth = 32;
6209 
6210           if (Op3Val >= RegWidth)
6211             return Error(Op3.getStartLoc(),
6212                          "expected integer in range [0, 31]");
6213           if (Op4Val < 1 || Op4Val > RegWidth)
6214             return Error(Op4.getStartLoc(),
6215                          "expected integer in range [1, 32]");
6216 
6217           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6218 
6219           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6220             return Error(Op4.getStartLoc(),
6221                          "requested extract overflows register");
6222 
6223           const MCExpr *NewOp4 =
6224               MCConstantExpr::create(NewOp4Val, getContext());
6225           Operands[4] = AArch64Operand::CreateImm(
6226               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6227           if (Tok == "bfxil")
6228             Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6229                                                       getContext());
6230           else if (Tok == "sbfx")
6231             Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6232                                                       getContext());
6233           else if (Tok == "ubfx")
6234             Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6235                                                       getContext());
6236           else
6237             llvm_unreachable("No valid mnemonic for alias?");
6238         }
6239       }
6240     }
6241   }
6242 
6243   // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6244   // instruction for FP registers correctly in some rare circumstances. Convert
6245   // it to a safe instruction and warn (because silently changing someone's
6246   // assembly is rude).
6247   if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6248       NumOperands == 4 && Tok == "movi") {
6249     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6250     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6251     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6252     if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6253         (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6254       StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6255       if (Suffix.lower() == ".2d" &&
6256           cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6257         Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6258                 " correctly on this CPU, converting to equivalent movi.16b");
6259         // Switch the suffix to .16b.
6260         unsigned Idx = Op1.isToken() ? 1 : 2;
6261         Operands[Idx] =
6262             AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6263       }
6264     }
6265   }
6266 
6267   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6268   //        InstAlias can't quite handle this since the reg classes aren't
6269   //        subclasses.
6270   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6271     // The source register can be Wn here, but the matcher expects a
6272     // GPR64. Twiddle it here if necessary.
6273     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6274     if (Op.isScalarReg()) {
6275       unsigned Reg = getXRegFromWReg(Op.getReg());
6276       Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6277                                               Op.getStartLoc(), Op.getEndLoc(),
6278                                               getContext());
6279     }
6280   }
6281   // FIXME: Likewise for sxt[bh] with a Xd dst operand
6282   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6283     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6284     if (Op.isScalarReg() &&
6285         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6286             Op.getReg())) {
6287       // The source register can be Wn here, but the matcher expects a
6288       // GPR64. Twiddle it here if necessary.
6289       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6290       if (Op.isScalarReg()) {
6291         unsigned Reg = getXRegFromWReg(Op.getReg());
6292         Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6293                                                 Op.getStartLoc(),
6294                                                 Op.getEndLoc(), getContext());
6295       }
6296     }
6297   }
6298   // FIXME: Likewise for uxt[bh] with a Xd dst operand
6299   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6300     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6301     if (Op.isScalarReg() &&
6302         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6303             Op.getReg())) {
6304       // The source register can be Wn here, but the matcher expects a
6305       // GPR32. Twiddle it here if necessary.
6306       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6307       if (Op.isScalarReg()) {
6308         unsigned Reg = getWRegFromXReg(Op.getReg());
6309         Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6310                                                 Op.getStartLoc(),
6311                                                 Op.getEndLoc(), getContext());
6312       }
6313     }
6314   }
6315 
6316   MCInst Inst;
6317   FeatureBitset MissingFeatures;
6318   // First try to match against the secondary set of tables containing the
6319   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6320   unsigned MatchResult =
6321       MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6322                            MatchingInlineAsm, 1);
6323 
6324   // If that fails, try against the alternate table containing long-form NEON:
6325   // "fadd v0.2s, v1.2s, v2.2s"
6326   if (MatchResult != Match_Success) {
6327     // But first, save the short-form match result: we can use it in case the
6328     // long-form match also fails.
6329     auto ShortFormNEONErrorInfo = ErrorInfo;
6330     auto ShortFormNEONMatchResult = MatchResult;
6331     auto ShortFormNEONMissingFeatures = MissingFeatures;
6332 
6333     MatchResult =
6334         MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6335                              MatchingInlineAsm, 0);
6336 
6337     // Now, both matches failed, and the long-form match failed on the mnemonic
6338     // suffix token operand.  The short-form match failure is probably more
6339     // relevant: use it instead.
6340     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6341         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6342         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6343       MatchResult = ShortFormNEONMatchResult;
6344       ErrorInfo = ShortFormNEONErrorInfo;
6345       MissingFeatures = ShortFormNEONMissingFeatures;
6346     }
6347   }
6348 
6349   switch (MatchResult) {
6350   case Match_Success: {
6351     // Perform range checking and other semantic validations
6352     SmallVector<SMLoc, 8> OperandLocs;
6353     NumOperands = Operands.size();
6354     for (unsigned i = 1; i < NumOperands; ++i)
6355       OperandLocs.push_back(Operands[i]->getStartLoc());
6356     if (validateInstruction(Inst, IDLoc, OperandLocs))
6357       return true;
6358 
6359     Inst.setLoc(IDLoc);
6360     Out.emitInstruction(Inst, getSTI());
6361     return false;
6362   }
6363   case Match_MissingFeature: {
6364     assert(MissingFeatures.any() && "Unknown missing feature!");
6365     // Special case the error message for the very common case where only
6366     // a single subtarget feature is missing (neon, e.g.).
6367     std::string Msg = "instruction requires:";
6368     for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6369       if (MissingFeatures[i]) {
6370         Msg += " ";
6371         Msg += getSubtargetFeatureName(i);
6372       }
6373     }
6374     return Error(IDLoc, Msg);
6375   }
6376   case Match_MnemonicFail:
6377     return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6378   case Match_InvalidOperand: {
6379     SMLoc ErrorLoc = IDLoc;
6380 
6381     if (ErrorInfo != ~0ULL) {
6382       if (ErrorInfo >= Operands.size())
6383         return Error(IDLoc, "too few operands for instruction",
6384                      SMRange(IDLoc, getTok().getLoc()));
6385 
6386       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6387       if (ErrorLoc == SMLoc())
6388         ErrorLoc = IDLoc;
6389     }
6390     // If the match failed on a suffix token operand, tweak the diagnostic
6391     // accordingly.
6392     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6393         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6394       MatchResult = Match_InvalidSuffix;
6395 
6396     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6397   }
6398   case Match_InvalidTiedOperand:
6399   case Match_InvalidMemoryIndexed1:
6400   case Match_InvalidMemoryIndexed2:
6401   case Match_InvalidMemoryIndexed4:
6402   case Match_InvalidMemoryIndexed8:
6403   case Match_InvalidMemoryIndexed16:
6404   case Match_InvalidCondCode:
6405   case Match_AddSubRegExtendSmall:
6406   case Match_AddSubRegExtendLarge:
6407   case Match_AddSubSecondSource:
6408   case Match_LogicalSecondSource:
6409   case Match_AddSubRegShift32:
6410   case Match_AddSubRegShift64:
6411   case Match_InvalidMovImm32Shift:
6412   case Match_InvalidMovImm64Shift:
6413   case Match_InvalidFPImm:
6414   case Match_InvalidMemoryWExtend8:
6415   case Match_InvalidMemoryWExtend16:
6416   case Match_InvalidMemoryWExtend32:
6417   case Match_InvalidMemoryWExtend64:
6418   case Match_InvalidMemoryWExtend128:
6419   case Match_InvalidMemoryXExtend8:
6420   case Match_InvalidMemoryXExtend16:
6421   case Match_InvalidMemoryXExtend32:
6422   case Match_InvalidMemoryXExtend64:
6423   case Match_InvalidMemoryXExtend128:
6424   case Match_InvalidMemoryIndexed1SImm4:
6425   case Match_InvalidMemoryIndexed2SImm4:
6426   case Match_InvalidMemoryIndexed3SImm4:
6427   case Match_InvalidMemoryIndexed4SImm4:
6428   case Match_InvalidMemoryIndexed1SImm6:
6429   case Match_InvalidMemoryIndexed16SImm4:
6430   case Match_InvalidMemoryIndexed32SImm4:
6431   case Match_InvalidMemoryIndexed4SImm7:
6432   case Match_InvalidMemoryIndexed8SImm7:
6433   case Match_InvalidMemoryIndexed16SImm7:
6434   case Match_InvalidMemoryIndexed8UImm5:
6435   case Match_InvalidMemoryIndexed8UImm3:
6436   case Match_InvalidMemoryIndexed4UImm5:
6437   case Match_InvalidMemoryIndexed2UImm5:
6438   case Match_InvalidMemoryIndexed1UImm6:
6439   case Match_InvalidMemoryIndexed2UImm6:
6440   case Match_InvalidMemoryIndexed4UImm6:
6441   case Match_InvalidMemoryIndexed8UImm6:
6442   case Match_InvalidMemoryIndexed16UImm6:
6443   case Match_InvalidMemoryIndexedSImm6:
6444   case Match_InvalidMemoryIndexedSImm5:
6445   case Match_InvalidMemoryIndexedSImm8:
6446   case Match_InvalidMemoryIndexedSImm9:
6447   case Match_InvalidMemoryIndexed16SImm9:
6448   case Match_InvalidMemoryIndexed8SImm10:
6449   case Match_InvalidImm0_0:
6450   case Match_InvalidImm0_1:
6451   case Match_InvalidImm0_3:
6452   case Match_InvalidImm0_7:
6453   case Match_InvalidImm0_15:
6454   case Match_InvalidImm0_31:
6455   case Match_InvalidImm0_63:
6456   case Match_InvalidImm0_127:
6457   case Match_InvalidImm0_255:
6458   case Match_InvalidImm0_65535:
6459   case Match_InvalidImm1_8:
6460   case Match_InvalidImm1_16:
6461   case Match_InvalidImm1_32:
6462   case Match_InvalidImm1_64:
6463   case Match_InvalidMemoryIndexedRange2UImm0:
6464   case Match_InvalidMemoryIndexedRange2UImm1:
6465   case Match_InvalidMemoryIndexedRange2UImm2:
6466   case Match_InvalidMemoryIndexedRange2UImm3:
6467   case Match_InvalidMemoryIndexedRange4UImm0:
6468   case Match_InvalidMemoryIndexedRange4UImm1:
6469   case Match_InvalidMemoryIndexedRange4UImm2:
6470   case Match_InvalidSVEAddSubImm8:
6471   case Match_InvalidSVEAddSubImm16:
6472   case Match_InvalidSVEAddSubImm32:
6473   case Match_InvalidSVEAddSubImm64:
6474   case Match_InvalidSVECpyImm8:
6475   case Match_InvalidSVECpyImm16:
6476   case Match_InvalidSVECpyImm32:
6477   case Match_InvalidSVECpyImm64:
6478   case Match_InvalidIndexRange0_0:
6479   case Match_InvalidIndexRange1_1:
6480   case Match_InvalidIndexRange0_15:
6481   case Match_InvalidIndexRange0_7:
6482   case Match_InvalidIndexRange0_3:
6483   case Match_InvalidIndexRange0_1:
6484   case Match_InvalidSVEIndexRange0_63:
6485   case Match_InvalidSVEIndexRange0_31:
6486   case Match_InvalidSVEIndexRange0_15:
6487   case Match_InvalidSVEIndexRange0_7:
6488   case Match_InvalidSVEIndexRange0_3:
6489   case Match_InvalidLabel:
6490   case Match_InvalidComplexRotationEven:
6491   case Match_InvalidComplexRotationOdd:
6492   case Match_InvalidGPR64shifted8:
6493   case Match_InvalidGPR64shifted16:
6494   case Match_InvalidGPR64shifted32:
6495   case Match_InvalidGPR64shifted64:
6496   case Match_InvalidGPR64shifted128:
6497   case Match_InvalidGPR64NoXZRshifted8:
6498   case Match_InvalidGPR64NoXZRshifted16:
6499   case Match_InvalidGPR64NoXZRshifted32:
6500   case Match_InvalidGPR64NoXZRshifted64:
6501   case Match_InvalidGPR64NoXZRshifted128:
6502   case Match_InvalidZPR32UXTW8:
6503   case Match_InvalidZPR32UXTW16:
6504   case Match_InvalidZPR32UXTW32:
6505   case Match_InvalidZPR32UXTW64:
6506   case Match_InvalidZPR32SXTW8:
6507   case Match_InvalidZPR32SXTW16:
6508   case Match_InvalidZPR32SXTW32:
6509   case Match_InvalidZPR32SXTW64:
6510   case Match_InvalidZPR64UXTW8:
6511   case Match_InvalidZPR64SXTW8:
6512   case Match_InvalidZPR64UXTW16:
6513   case Match_InvalidZPR64SXTW16:
6514   case Match_InvalidZPR64UXTW32:
6515   case Match_InvalidZPR64SXTW32:
6516   case Match_InvalidZPR64UXTW64:
6517   case Match_InvalidZPR64SXTW64:
6518   case Match_InvalidZPR32LSL8:
6519   case Match_InvalidZPR32LSL16:
6520   case Match_InvalidZPR32LSL32:
6521   case Match_InvalidZPR32LSL64:
6522   case Match_InvalidZPR64LSL8:
6523   case Match_InvalidZPR64LSL16:
6524   case Match_InvalidZPR64LSL32:
6525   case Match_InvalidZPR64LSL64:
6526   case Match_InvalidZPR0:
6527   case Match_InvalidZPR8:
6528   case Match_InvalidZPR16:
6529   case Match_InvalidZPR32:
6530   case Match_InvalidZPR64:
6531   case Match_InvalidZPR128:
6532   case Match_InvalidZPR_3b8:
6533   case Match_InvalidZPR_3b16:
6534   case Match_InvalidZPR_3b32:
6535   case Match_InvalidZPR_4b8:
6536   case Match_InvalidZPR_4b16:
6537   case Match_InvalidZPR_4b32:
6538   case Match_InvalidZPR_4b64:
6539   case Match_InvalidSVEPredicateAnyReg:
6540   case Match_InvalidSVEPattern:
6541   case Match_InvalidSVEVecLenSpecifier:
6542   case Match_InvalidSVEPredicateBReg:
6543   case Match_InvalidSVEPredicateHReg:
6544   case Match_InvalidSVEPredicateSReg:
6545   case Match_InvalidSVEPredicateDReg:
6546   case Match_InvalidSVEPredicate3bAnyReg:
6547   case Match_InvalidSVEPNPredicateB_p8to15Reg:
6548   case Match_InvalidSVEPNPredicateH_p8to15Reg:
6549   case Match_InvalidSVEPNPredicateS_p8to15Reg:
6550   case Match_InvalidSVEPNPredicateD_p8to15Reg:
6551   case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6552   case Match_InvalidSVEPNPredicateBReg:
6553   case Match_InvalidSVEPNPredicateHReg:
6554   case Match_InvalidSVEPNPredicateSReg:
6555   case Match_InvalidSVEPNPredicateDReg:
6556   case Match_InvalidSVEPredicateListMul2x8:
6557   case Match_InvalidSVEPredicateListMul2x16:
6558   case Match_InvalidSVEPredicateListMul2x32:
6559   case Match_InvalidSVEPredicateListMul2x64:
6560   case Match_InvalidSVEExactFPImmOperandHalfOne:
6561   case Match_InvalidSVEExactFPImmOperandHalfTwo:
6562   case Match_InvalidSVEExactFPImmOperandZeroOne:
6563   case Match_InvalidMatrixTile32:
6564   case Match_InvalidMatrixTile64:
6565   case Match_InvalidMatrix:
6566   case Match_InvalidMatrix8:
6567   case Match_InvalidMatrix16:
6568   case Match_InvalidMatrix32:
6569   case Match_InvalidMatrix64:
6570   case Match_InvalidMatrixTileVectorH8:
6571   case Match_InvalidMatrixTileVectorH16:
6572   case Match_InvalidMatrixTileVectorH32:
6573   case Match_InvalidMatrixTileVectorH64:
6574   case Match_InvalidMatrixTileVectorH128:
6575   case Match_InvalidMatrixTileVectorV8:
6576   case Match_InvalidMatrixTileVectorV16:
6577   case Match_InvalidMatrixTileVectorV32:
6578   case Match_InvalidMatrixTileVectorV64:
6579   case Match_InvalidMatrixTileVectorV128:
6580   case Match_InvalidSVCR:
6581   case Match_InvalidMatrixIndexGPR32_12_15:
6582   case Match_InvalidMatrixIndexGPR32_8_11:
6583   case Match_InvalidLookupTable:
6584   case Match_InvalidSVEVectorListMul2x8:
6585   case Match_InvalidSVEVectorListMul2x16:
6586   case Match_InvalidSVEVectorListMul2x32:
6587   case Match_InvalidSVEVectorListMul2x64:
6588   case Match_InvalidSVEVectorListMul4x8:
6589   case Match_InvalidSVEVectorListMul4x16:
6590   case Match_InvalidSVEVectorListMul4x32:
6591   case Match_InvalidSVEVectorListMul4x64:
6592   case Match_InvalidSVEVectorListStrided2x8:
6593   case Match_InvalidSVEVectorListStrided2x16:
6594   case Match_InvalidSVEVectorListStrided2x32:
6595   case Match_InvalidSVEVectorListStrided2x64:
6596   case Match_InvalidSVEVectorListStrided4x8:
6597   case Match_InvalidSVEVectorListStrided4x16:
6598   case Match_InvalidSVEVectorListStrided4x32:
6599   case Match_InvalidSVEVectorListStrided4x64:
6600   case Match_MSR:
6601   case Match_MRS: {
6602     if (ErrorInfo >= Operands.size())
6603       return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6604     // Any time we get here, there's nothing fancy to do. Just get the
6605     // operand SMLoc and display the diagnostic.
6606     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6607     if (ErrorLoc == SMLoc())
6608       ErrorLoc = IDLoc;
6609     return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6610   }
6611   }
6612 
6613   llvm_unreachable("Implement any new match types added!");
6614 }
6615 
6616 /// ParseDirective parses the arm specific directives
6617 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
6618   const MCContext::Environment Format = getContext().getObjectFileType();
6619   bool IsMachO = Format == MCContext::IsMachO;
6620   bool IsCOFF = Format == MCContext::IsCOFF;
6621 
6622   auto IDVal = DirectiveID.getIdentifier().lower();
6623   SMLoc Loc = DirectiveID.getLoc();
6624   if (IDVal == ".arch")
6625     parseDirectiveArch(Loc);
6626   else if (IDVal == ".cpu")
6627     parseDirectiveCPU(Loc);
6628   else if (IDVal == ".tlsdesccall")
6629     parseDirectiveTLSDescCall(Loc);
6630   else if (IDVal == ".ltorg" || IDVal == ".pool")
6631     parseDirectiveLtorg(Loc);
6632   else if (IDVal == ".unreq")
6633     parseDirectiveUnreq(Loc);
6634   else if (IDVal == ".inst")
6635     parseDirectiveInst(Loc);
6636   else if (IDVal == ".cfi_negate_ra_state")
6637     parseDirectiveCFINegateRAState();
6638   else if (IDVal == ".cfi_b_key_frame")
6639     parseDirectiveCFIBKeyFrame();
6640   else if (IDVal == ".cfi_mte_tagged_frame")
6641     parseDirectiveCFIMTETaggedFrame();
6642   else if (IDVal == ".arch_extension")
6643     parseDirectiveArchExtension(Loc);
6644   else if (IDVal == ".variant_pcs")
6645     parseDirectiveVariantPCS(Loc);
6646   else if (IsMachO) {
6647     if (IDVal == MCLOHDirectiveName())
6648       parseDirectiveLOH(IDVal, Loc);
6649     else
6650       return true;
6651   } else if (IsCOFF) {
6652     if (IDVal == ".seh_stackalloc")
6653       parseDirectiveSEHAllocStack(Loc);
6654     else if (IDVal == ".seh_endprologue")
6655       parseDirectiveSEHPrologEnd(Loc);
6656     else if (IDVal == ".seh_save_r19r20_x")
6657       parseDirectiveSEHSaveR19R20X(Loc);
6658     else if (IDVal == ".seh_save_fplr")
6659       parseDirectiveSEHSaveFPLR(Loc);
6660     else if (IDVal == ".seh_save_fplr_x")
6661       parseDirectiveSEHSaveFPLRX(Loc);
6662     else if (IDVal == ".seh_save_reg")
6663       parseDirectiveSEHSaveReg(Loc);
6664     else if (IDVal == ".seh_save_reg_x")
6665       parseDirectiveSEHSaveRegX(Loc);
6666     else if (IDVal == ".seh_save_regp")
6667       parseDirectiveSEHSaveRegP(Loc);
6668     else if (IDVal == ".seh_save_regp_x")
6669       parseDirectiveSEHSaveRegPX(Loc);
6670     else if (IDVal == ".seh_save_lrpair")
6671       parseDirectiveSEHSaveLRPair(Loc);
6672     else if (IDVal == ".seh_save_freg")
6673       parseDirectiveSEHSaveFReg(Loc);
6674     else if (IDVal == ".seh_save_freg_x")
6675       parseDirectiveSEHSaveFRegX(Loc);
6676     else if (IDVal == ".seh_save_fregp")
6677       parseDirectiveSEHSaveFRegP(Loc);
6678     else if (IDVal == ".seh_save_fregp_x")
6679       parseDirectiveSEHSaveFRegPX(Loc);
6680     else if (IDVal == ".seh_set_fp")
6681       parseDirectiveSEHSetFP(Loc);
6682     else if (IDVal == ".seh_add_fp")
6683       parseDirectiveSEHAddFP(Loc);
6684     else if (IDVal == ".seh_nop")
6685       parseDirectiveSEHNop(Loc);
6686     else if (IDVal == ".seh_save_next")
6687       parseDirectiveSEHSaveNext(Loc);
6688     else if (IDVal == ".seh_startepilogue")
6689       parseDirectiveSEHEpilogStart(Loc);
6690     else if (IDVal == ".seh_endepilogue")
6691       parseDirectiveSEHEpilogEnd(Loc);
6692     else if (IDVal == ".seh_trap_frame")
6693       parseDirectiveSEHTrapFrame(Loc);
6694     else if (IDVal == ".seh_pushframe")
6695       parseDirectiveSEHMachineFrame(Loc);
6696     else if (IDVal == ".seh_context")
6697       parseDirectiveSEHContext(Loc);
6698     else if (IDVal == ".seh_clear_unwound_to_call")
6699       parseDirectiveSEHClearUnwoundToCall(Loc);
6700     else if (IDVal == ".seh_pac_sign_lr")
6701       parseDirectiveSEHPACSignLR(Loc);
6702     else if (IDVal == ".seh_save_any_reg")
6703       parseDirectiveSEHSaveAnyReg(Loc, false, false);
6704     else if (IDVal == ".seh_save_any_reg_p")
6705       parseDirectiveSEHSaveAnyReg(Loc, true, false);
6706     else if (IDVal == ".seh_save_any_reg_x")
6707       parseDirectiveSEHSaveAnyReg(Loc, false, true);
6708     else if (IDVal == ".seh_save_any_reg_px")
6709       parseDirectiveSEHSaveAnyReg(Loc, true, true);
6710     else
6711       return true;
6712   } else
6713     return true;
6714   return false;
6715 }
6716 
6717 static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
6718                             SmallVector<StringRef, 4> &RequestedExtensions) {
6719   const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
6720   const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
6721 
6722   if (!NoCrypto && Crypto) {
6723     // Map 'generic' (and others) to sha2 and aes, because
6724     // that was the traditional meaning of crypto.
6725     if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6726         ArchInfo == AArch64::ARMV8_3A) {
6727       RequestedExtensions.push_back("sha2");
6728       RequestedExtensions.push_back("aes");
6729     }
6730     if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6731         ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6732         ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6733         ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6734         ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6735         ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
6736       RequestedExtensions.push_back("sm4");
6737       RequestedExtensions.push_back("sha3");
6738       RequestedExtensions.push_back("sha2");
6739       RequestedExtensions.push_back("aes");
6740     }
6741   } else if (NoCrypto) {
6742     // Map 'generic' (and others) to sha2 and aes, because
6743     // that was the traditional meaning of crypto.
6744     if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
6745         ArchInfo == AArch64::ARMV8_3A) {
6746       RequestedExtensions.push_back("nosha2");
6747       RequestedExtensions.push_back("noaes");
6748     }
6749     if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
6750         ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
6751         ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
6752         ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
6753         ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
6754         ArchInfo == AArch64::ARMV9_4A) {
6755       RequestedExtensions.push_back("nosm4");
6756       RequestedExtensions.push_back("nosha3");
6757       RequestedExtensions.push_back("nosha2");
6758       RequestedExtensions.push_back("noaes");
6759     }
6760   }
6761 }
6762 
6763 /// parseDirectiveArch
6764 ///   ::= .arch token
6765 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
6766   SMLoc ArchLoc = getLoc();
6767 
6768   StringRef Arch, ExtensionString;
6769   std::tie(Arch, ExtensionString) =
6770       getParser().parseStringToEndOfStatement().trim().split('+');
6771 
6772   std::optional<AArch64::ArchInfo> ArchInfo = AArch64::parseArch(Arch);
6773   if (!ArchInfo)
6774     return Error(ArchLoc, "unknown arch name");
6775 
6776   if (parseToken(AsmToken::EndOfStatement))
6777     return true;
6778 
6779   // Get the architecture and extension features.
6780   std::vector<StringRef> AArch64Features;
6781   AArch64Features.push_back(ArchInfo->ArchFeature);
6782   AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
6783 
6784   MCSubtargetInfo &STI = copySTI();
6785   std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
6786   STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
6787                          join(ArchFeatures.begin(), ArchFeatures.end(), ","));
6788 
6789   SmallVector<StringRef, 4> RequestedExtensions;
6790   if (!ExtensionString.empty())
6791     ExtensionString.split(RequestedExtensions, '+');
6792 
6793   ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
6794 
6795   FeatureBitset Features = STI.getFeatureBits();
6796   setAvailableFeatures(ComputeAvailableFeatures(Features));
6797   for (auto Name : RequestedExtensions) {
6798     bool EnableFeature = true;
6799 
6800     if (Name.starts_with_insensitive("no")) {
6801       EnableFeature = false;
6802       Name = Name.substr(2);
6803     }
6804 
6805     for (const auto &Extension : ExtensionMap) {
6806       if (Extension.Name != Name)
6807         continue;
6808 
6809       if (Extension.Features.none())
6810         report_fatal_error("unsupported architectural extension: " + Name);
6811 
6812       FeatureBitset ToggleFeatures =
6813           EnableFeature
6814               ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6815               : STI.ToggleFeature(Features & Extension.Features);
6816       setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6817       break;
6818     }
6819   }
6820   return false;
6821 }
6822 
6823 /// parseDirectiveArchExtension
6824 ///   ::= .arch_extension [no]feature
6825 bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
6826   SMLoc ExtLoc = getLoc();
6827 
6828   StringRef Name = getParser().parseStringToEndOfStatement().trim();
6829 
6830   if (parseEOL())
6831     return true;
6832 
6833   bool EnableFeature = true;
6834   if (Name.starts_with_insensitive("no")) {
6835     EnableFeature = false;
6836     Name = Name.substr(2);
6837   }
6838 
6839   MCSubtargetInfo &STI = copySTI();
6840   FeatureBitset Features = STI.getFeatureBits();
6841   for (const auto &Extension : ExtensionMap) {
6842     if (Extension.Name != Name)
6843       continue;
6844 
6845     if (Extension.Features.none())
6846       return Error(ExtLoc, "unsupported architectural extension: " + Name);
6847 
6848     FeatureBitset ToggleFeatures =
6849         EnableFeature
6850             ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6851             : STI.ToggleFeature(Features & Extension.Features);
6852     setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6853     return false;
6854   }
6855 
6856   return Error(ExtLoc, "unknown architectural extension: " + Name);
6857 }
6858 
6859 static SMLoc incrementLoc(SMLoc L, int Offset) {
6860   return SMLoc::getFromPointer(L.getPointer() + Offset);
6861 }
6862 
6863 /// parseDirectiveCPU
6864 ///   ::= .cpu id
6865 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
6866   SMLoc CurLoc = getLoc();
6867 
6868   StringRef CPU, ExtensionString;
6869   std::tie(CPU, ExtensionString) =
6870       getParser().parseStringToEndOfStatement().trim().split('+');
6871 
6872   if (parseToken(AsmToken::EndOfStatement))
6873     return true;
6874 
6875   SmallVector<StringRef, 4> RequestedExtensions;
6876   if (!ExtensionString.empty())
6877     ExtensionString.split(RequestedExtensions, '+');
6878 
6879   const std::optional<llvm::AArch64::ArchInfo> CpuArch = llvm::AArch64::getArchForCpu(CPU);
6880   if (!CpuArch) {
6881     Error(CurLoc, "unknown CPU name");
6882     return false;
6883   }
6884   ExpandCryptoAEK(*CpuArch, RequestedExtensions);
6885 
6886   MCSubtargetInfo &STI = copySTI();
6887   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
6888   CurLoc = incrementLoc(CurLoc, CPU.size());
6889 
6890   for (auto Name : RequestedExtensions) {
6891     // Advance source location past '+'.
6892     CurLoc = incrementLoc(CurLoc, 1);
6893 
6894     bool EnableFeature = true;
6895 
6896     if (Name.starts_with_insensitive("no")) {
6897       EnableFeature = false;
6898       Name = Name.substr(2);
6899     }
6900 
6901     bool FoundExtension = false;
6902     for (const auto &Extension : ExtensionMap) {
6903       if (Extension.Name != Name)
6904         continue;
6905 
6906       if (Extension.Features.none())
6907         report_fatal_error("unsupported architectural extension: " + Name);
6908 
6909       FeatureBitset Features = STI.getFeatureBits();
6910       FeatureBitset ToggleFeatures =
6911           EnableFeature
6912               ? STI.SetFeatureBitsTransitively(~Features & Extension.Features)
6913               : STI.ToggleFeature(Features & Extension.Features);
6914       setAvailableFeatures(ComputeAvailableFeatures(ToggleFeatures));
6915       FoundExtension = true;
6916 
6917       break;
6918     }
6919 
6920     if (!FoundExtension)
6921       Error(CurLoc, "unsupported architectural extension");
6922 
6923     CurLoc = incrementLoc(CurLoc, Name.size());
6924   }
6925   return false;
6926 }
6927 
6928 /// parseDirectiveInst
6929 ///  ::= .inst opcode [, ...]
6930 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
6931   if (getLexer().is(AsmToken::EndOfStatement))
6932     return Error(Loc, "expected expression following '.inst' directive");
6933 
6934   auto parseOp = [&]() -> bool {
6935     SMLoc L = getLoc();
6936     const MCExpr *Expr = nullptr;
6937     if (check(getParser().parseExpression(Expr), L, "expected expression"))
6938       return true;
6939     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6940     if (check(!Value, L, "expected constant expression"))
6941       return true;
6942     getTargetStreamer().emitInst(Value->getValue());
6943     return false;
6944   };
6945 
6946   return parseMany(parseOp);
6947 }
6948 
6949 // parseDirectiveTLSDescCall:
6950 //   ::= .tlsdesccall symbol
6951 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
6952   StringRef Name;
6953   if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
6954       parseToken(AsmToken::EndOfStatement))
6955     return true;
6956 
6957   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
6958   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
6959   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
6960 
6961   MCInst Inst;
6962   Inst.setOpcode(AArch64::TLSDESCCALL);
6963   Inst.addOperand(MCOperand::createExpr(Expr));
6964 
6965   getParser().getStreamer().emitInstruction(Inst, getSTI());
6966   return false;
6967 }
6968 
6969 /// ::= .loh <lohName | lohId> label1, ..., labelN
6970 /// The number of arguments depends on the loh identifier.
6971 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
6972   MCLOHType Kind;
6973   if (getTok().isNot(AsmToken::Identifier)) {
6974     if (getTok().isNot(AsmToken::Integer))
6975       return TokError("expected an identifier or a number in directive");
6976     // We successfully get a numeric value for the identifier.
6977     // Check if it is valid.
6978     int64_t Id = getTok().getIntVal();
6979     if (Id <= -1U && !isValidMCLOHType(Id))
6980       return TokError("invalid numeric identifier in directive");
6981     Kind = (MCLOHType)Id;
6982   } else {
6983     StringRef Name = getTok().getIdentifier();
6984     // We successfully parse an identifier.
6985     // Check if it is a recognized one.
6986     int Id = MCLOHNameToId(Name);
6987 
6988     if (Id == -1)
6989       return TokError("invalid identifier in directive");
6990     Kind = (MCLOHType)Id;
6991   }
6992   // Consume the identifier.
6993   Lex();
6994   // Get the number of arguments of this LOH.
6995   int NbArgs = MCLOHIdToNbArgs(Kind);
6996 
6997   assert(NbArgs != -1 && "Invalid number of arguments");
6998 
6999   SmallVector<MCSymbol *, 3> Args;
7000   for (int Idx = 0; Idx < NbArgs; ++Idx) {
7001     StringRef Name;
7002     if (getParser().parseIdentifier(Name))
7003       return TokError("expected identifier in directive");
7004     Args.push_back(getContext().getOrCreateSymbol(Name));
7005 
7006     if (Idx + 1 == NbArgs)
7007       break;
7008     if (parseComma())
7009       return true;
7010   }
7011   if (parseEOL())
7012     return true;
7013 
7014   getStreamer().emitLOHDirective((MCLOHType)Kind, Args);
7015   return false;
7016 }
7017 
7018 /// parseDirectiveLtorg
7019 ///  ::= .ltorg | .pool
7020 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7021   if (parseEOL())
7022     return true;
7023   getTargetStreamer().emitCurrentConstantPool();
7024   return false;
7025 }
7026 
7027 /// parseDirectiveReq
7028 ///  ::= name .req registername
7029 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7030   Lex(); // Eat the '.req' token.
7031   SMLoc SRegLoc = getLoc();
7032   RegKind RegisterKind = RegKind::Scalar;
7033   MCRegister RegNum;
7034   ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7035 
7036   if (!ParseRes.isSuccess()) {
7037     StringRef Kind;
7038     RegisterKind = RegKind::NeonVector;
7039     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7040 
7041     if (ParseRes.isFailure())
7042       return true;
7043 
7044     if (ParseRes.isSuccess() && !Kind.empty())
7045       return Error(SRegLoc, "vector register without type specifier expected");
7046   }
7047 
7048   if (!ParseRes.isSuccess()) {
7049     StringRef Kind;
7050     RegisterKind = RegKind::SVEDataVector;
7051     ParseRes =
7052         tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7053 
7054     if (ParseRes.isFailure())
7055       return true;
7056 
7057     if (ParseRes.isSuccess() && !Kind.empty())
7058       return Error(SRegLoc,
7059                    "sve vector register without type specifier expected");
7060   }
7061 
7062   if (!ParseRes.isSuccess()) {
7063     StringRef Kind;
7064     RegisterKind = RegKind::SVEPredicateVector;
7065     ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7066 
7067     if (ParseRes.isFailure())
7068       return true;
7069 
7070     if (ParseRes.isSuccess() && !Kind.empty())
7071       return Error(SRegLoc,
7072                    "sve predicate register without type specifier expected");
7073   }
7074 
7075   if (!ParseRes.isSuccess())
7076     return Error(SRegLoc, "register name or alias expected");
7077 
7078   // Shouldn't be anything else.
7079   if (parseEOL())
7080     return true;
7081 
7082   auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7083   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7084     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7085 
7086   return false;
7087 }
7088 
7089 /// parseDirectiveUneq
7090 ///  ::= .unreq registername
7091 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7092   if (getTok().isNot(AsmToken::Identifier))
7093     return TokError("unexpected input in .unreq directive.");
7094   RegisterReqs.erase(getTok().getIdentifier().lower());
7095   Lex(); // Eat the identifier.
7096   return parseToken(AsmToken::EndOfStatement);
7097 }
7098 
7099 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7100   if (parseEOL())
7101     return true;
7102   getStreamer().emitCFINegateRAState();
7103   return false;
7104 }
7105 
7106 /// parseDirectiveCFIBKeyFrame
7107 /// ::= .cfi_b_key
7108 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7109   if (parseEOL())
7110     return true;
7111   getStreamer().emitCFIBKeyFrame();
7112   return false;
7113 }
7114 
7115 /// parseDirectiveCFIMTETaggedFrame
7116 /// ::= .cfi_mte_tagged_frame
7117 bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7118   if (parseEOL())
7119     return true;
7120   getStreamer().emitCFIMTETaggedFrame();
7121   return false;
7122 }
7123 
7124 /// parseDirectiveVariantPCS
7125 /// ::= .variant_pcs symbolname
7126 bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7127   StringRef Name;
7128   if (getParser().parseIdentifier(Name))
7129     return TokError("expected symbol name");
7130   if (parseEOL())
7131     return true;
7132   getTargetStreamer().emitDirectiveVariantPCS(
7133       getContext().getOrCreateSymbol(Name));
7134   return false;
7135 }
7136 
7137 /// parseDirectiveSEHAllocStack
7138 /// ::= .seh_stackalloc
7139 bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7140   int64_t Size;
7141   if (parseImmExpr(Size))
7142     return true;
7143   getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7144   return false;
7145 }
7146 
7147 /// parseDirectiveSEHPrologEnd
7148 /// ::= .seh_endprologue
7149 bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7150   getTargetStreamer().emitARM64WinCFIPrologEnd();
7151   return false;
7152 }
7153 
7154 /// parseDirectiveSEHSaveR19R20X
7155 /// ::= .seh_save_r19r20_x
7156 bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7157   int64_t Offset;
7158   if (parseImmExpr(Offset))
7159     return true;
7160   getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7161   return false;
7162 }
7163 
7164 /// parseDirectiveSEHSaveFPLR
7165 /// ::= .seh_save_fplr
7166 bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7167   int64_t Offset;
7168   if (parseImmExpr(Offset))
7169     return true;
7170   getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7171   return false;
7172 }
7173 
7174 /// parseDirectiveSEHSaveFPLRX
7175 /// ::= .seh_save_fplr_x
7176 bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7177   int64_t Offset;
7178   if (parseImmExpr(Offset))
7179     return true;
7180   getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7181   return false;
7182 }
7183 
7184 /// parseDirectiveSEHSaveReg
7185 /// ::= .seh_save_reg
7186 bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7187   unsigned Reg;
7188   int64_t Offset;
7189   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7190       parseComma() || parseImmExpr(Offset))
7191     return true;
7192   getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7193   return false;
7194 }
7195 
7196 /// parseDirectiveSEHSaveRegX
7197 /// ::= .seh_save_reg_x
7198 bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7199   unsigned Reg;
7200   int64_t Offset;
7201   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7202       parseComma() || parseImmExpr(Offset))
7203     return true;
7204   getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7205   return false;
7206 }
7207 
7208 /// parseDirectiveSEHSaveRegP
7209 /// ::= .seh_save_regp
7210 bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7211   unsigned Reg;
7212   int64_t Offset;
7213   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7214       parseComma() || parseImmExpr(Offset))
7215     return true;
7216   getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7217   return false;
7218 }
7219 
7220 /// parseDirectiveSEHSaveRegPX
7221 /// ::= .seh_save_regp_x
7222 bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7223   unsigned Reg;
7224   int64_t Offset;
7225   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7226       parseComma() || parseImmExpr(Offset))
7227     return true;
7228   getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7229   return false;
7230 }
7231 
7232 /// parseDirectiveSEHSaveLRPair
7233 /// ::= .seh_save_lrpair
7234 bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7235   unsigned Reg;
7236   int64_t Offset;
7237   L = getLoc();
7238   if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7239       parseComma() || parseImmExpr(Offset))
7240     return true;
7241   if (check(((Reg - 19) % 2 != 0), L,
7242             "expected register with even offset from x19"))
7243     return true;
7244   getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7245   return false;
7246 }
7247 
7248 /// parseDirectiveSEHSaveFReg
7249 /// ::= .seh_save_freg
7250 bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7251   unsigned Reg;
7252   int64_t Offset;
7253   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7254       parseComma() || parseImmExpr(Offset))
7255     return true;
7256   getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7257   return false;
7258 }
7259 
7260 /// parseDirectiveSEHSaveFRegX
7261 /// ::= .seh_save_freg_x
7262 bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7263   unsigned Reg;
7264   int64_t Offset;
7265   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7266       parseComma() || parseImmExpr(Offset))
7267     return true;
7268   getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7269   return false;
7270 }
7271 
7272 /// parseDirectiveSEHSaveFRegP
7273 /// ::= .seh_save_fregp
7274 bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7275   unsigned Reg;
7276   int64_t Offset;
7277   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7278       parseComma() || parseImmExpr(Offset))
7279     return true;
7280   getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7281   return false;
7282 }
7283 
7284 /// parseDirectiveSEHSaveFRegPX
7285 /// ::= .seh_save_fregp_x
7286 bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7287   unsigned Reg;
7288   int64_t Offset;
7289   if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7290       parseComma() || parseImmExpr(Offset))
7291     return true;
7292   getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7293   return false;
7294 }
7295 
7296 /// parseDirectiveSEHSetFP
7297 /// ::= .seh_set_fp
7298 bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7299   getTargetStreamer().emitARM64WinCFISetFP();
7300   return false;
7301 }
7302 
7303 /// parseDirectiveSEHAddFP
7304 /// ::= .seh_add_fp
7305 bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7306   int64_t Size;
7307   if (parseImmExpr(Size))
7308     return true;
7309   getTargetStreamer().emitARM64WinCFIAddFP(Size);
7310   return false;
7311 }
7312 
7313 /// parseDirectiveSEHNop
7314 /// ::= .seh_nop
7315 bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7316   getTargetStreamer().emitARM64WinCFINop();
7317   return false;
7318 }
7319 
7320 /// parseDirectiveSEHSaveNext
7321 /// ::= .seh_save_next
7322 bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7323   getTargetStreamer().emitARM64WinCFISaveNext();
7324   return false;
7325 }
7326 
7327 /// parseDirectiveSEHEpilogStart
7328 /// ::= .seh_startepilogue
7329 bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7330   getTargetStreamer().emitARM64WinCFIEpilogStart();
7331   return false;
7332 }
7333 
7334 /// parseDirectiveSEHEpilogEnd
7335 /// ::= .seh_endepilogue
7336 bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7337   getTargetStreamer().emitARM64WinCFIEpilogEnd();
7338   return false;
7339 }
7340 
7341 /// parseDirectiveSEHTrapFrame
7342 /// ::= .seh_trap_frame
7343 bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7344   getTargetStreamer().emitARM64WinCFITrapFrame();
7345   return false;
7346 }
7347 
7348 /// parseDirectiveSEHMachineFrame
7349 /// ::= .seh_pushframe
7350 bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7351   getTargetStreamer().emitARM64WinCFIMachineFrame();
7352   return false;
7353 }
7354 
7355 /// parseDirectiveSEHContext
7356 /// ::= .seh_context
7357 bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7358   getTargetStreamer().emitARM64WinCFIContext();
7359   return false;
7360 }
7361 
7362 /// parseDirectiveSEHClearUnwoundToCall
7363 /// ::= .seh_clear_unwound_to_call
7364 bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7365   getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7366   return false;
7367 }
7368 
7369 /// parseDirectiveSEHPACSignLR
7370 /// ::= .seh_pac_sign_lr
7371 bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7372   getTargetStreamer().emitARM64WinCFIPACSignLR();
7373   return false;
7374 }
7375 
7376 /// parseDirectiveSEHSaveAnyReg
7377 /// ::= .seh_save_any_reg
7378 /// ::= .seh_save_any_reg_p
7379 /// ::= .seh_save_any_reg_x
7380 /// ::= .seh_save_any_reg_px
7381 bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7382                                                    bool Writeback) {
7383   MCRegister Reg;
7384   SMLoc Start, End;
7385   int64_t Offset;
7386   if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7387       parseComma() || parseImmExpr(Offset))
7388     return true;
7389 
7390   if (Reg == AArch64::FP || Reg == AArch64::LR ||
7391       (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7392     if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7393       return Error(L, "invalid save_any_reg offset");
7394     unsigned EncodedReg;
7395     if (Reg == AArch64::FP)
7396       EncodedReg = 29;
7397     else if (Reg == AArch64::LR)
7398       EncodedReg = 30;
7399     else
7400       EncodedReg = Reg - AArch64::X0;
7401     if (Paired) {
7402       if (Reg == AArch64::LR)
7403         return Error(Start, "lr cannot be paired with another register");
7404       if (Writeback)
7405         getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7406       else
7407         getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7408     } else {
7409       if (Writeback)
7410         getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7411       else
7412         getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7413     }
7414   } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7415     unsigned EncodedReg = Reg - AArch64::D0;
7416     if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7417       return Error(L, "invalid save_any_reg offset");
7418     if (Paired) {
7419       if (Reg == AArch64::D31)
7420         return Error(Start, "d31 cannot be paired with another register");
7421       if (Writeback)
7422         getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7423       else
7424         getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7425     } else {
7426       if (Writeback)
7427         getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7428       else
7429         getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7430     }
7431   } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7432     unsigned EncodedReg = Reg - AArch64::Q0;
7433     if (Offset < 0 || Offset % 16)
7434       return Error(L, "invalid save_any_reg offset");
7435     if (Paired) {
7436       if (Reg == AArch64::Q31)
7437         return Error(Start, "q31 cannot be paired with another register");
7438       if (Writeback)
7439         getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7440       else
7441         getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7442     } else {
7443       if (Writeback)
7444         getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7445       else
7446         getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7447     }
7448   } else {
7449     return Error(Start, "save_any_reg register must be x, q or d register");
7450   }
7451   return false;
7452 }
7453 
7454 bool
7455 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
7456                                     AArch64MCExpr::VariantKind &ELFRefKind,
7457                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
7458                                     int64_t &Addend) {
7459   ELFRefKind = AArch64MCExpr::VK_INVALID;
7460   DarwinRefKind = MCSymbolRefExpr::VK_None;
7461   Addend = 0;
7462 
7463   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
7464     ELFRefKind = AE->getKind();
7465     Expr = AE->getSubExpr();
7466   }
7467 
7468   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
7469   if (SE) {
7470     // It's a simple symbol reference with no addend.
7471     DarwinRefKind = SE->getKind();
7472     return true;
7473   }
7474 
7475   // Check that it looks like a symbol + an addend
7476   MCValue Res;
7477   bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr, nullptr);
7478   if (!Relocatable || Res.getSymB())
7479     return false;
7480 
7481   // Treat expressions with an ELFRefKind (like ":abs_g1:3", or
7482   // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
7483   if (!Res.getSymA() && ELFRefKind == AArch64MCExpr::VK_INVALID)
7484     return false;
7485 
7486   if (Res.getSymA())
7487     DarwinRefKind = Res.getSymA()->getKind();
7488   Addend = Res.getConstant();
7489 
7490   // It's some symbol reference + a constant addend, but really
7491   // shouldn't use both Darwin and ELF syntax.
7492   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
7493          DarwinRefKind == MCSymbolRefExpr::VK_None;
7494 }
7495 
7496 /// Force static initialization.
7497 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser() {
7498   RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
7499   RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
7500   RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
7501   RegisterMCAsmParser<AArch64AsmParser> W(getTheARM64_32Target());
7502   RegisterMCAsmParser<AArch64AsmParser> V(getTheAArch64_32Target());
7503 }
7504 
7505 #define GET_REGISTER_MATCHER
7506 #define GET_SUBTARGET_FEATURE_NAME
7507 #define GET_MATCHER_IMPLEMENTATION
7508 #define GET_MNEMONIC_SPELL_CHECKER
7509 #include "AArch64GenAsmMatcher.inc"
7510 
7511 // Define this matcher function after the auto-generated include so we
7512 // have the match class enum definitions.
7513 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
7514                                                       unsigned Kind) {
7515   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
7516 
7517   auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
7518     if (!Op.isImm())
7519       return Match_InvalidOperand;
7520     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7521     if (!CE)
7522       return Match_InvalidOperand;
7523     if (CE->getValue() == ExpectedVal)
7524       return Match_Success;
7525     return Match_InvalidOperand;
7526   };
7527 
7528   switch (Kind) {
7529   default:
7530     return Match_InvalidOperand;
7531   case MCK_MPR:
7532     // If the Kind is a token for the MPR register class which has the "za"
7533     // register (SME accumulator array), check if the asm is a literal "za"
7534     // token. This is for the "smstart za" alias that defines the register
7535     // as a literal token.
7536     if (Op.isTokenEqual("za"))
7537       return Match_Success;
7538     return Match_InvalidOperand;
7539 
7540     // If the kind is a token for a literal immediate, check if our asm operand
7541     // matches. This is for InstAliases which have a fixed-value immediate in
7542     // the asm string, such as hints which are parsed into a specific
7543     // instruction definition.
7544 #define MATCH_HASH(N)                                                          \
7545   case MCK__HASH_##N:                                                          \
7546     return MatchesOpImmediate(N);
7547     MATCH_HASH(0)
7548     MATCH_HASH(1)
7549     MATCH_HASH(2)
7550     MATCH_HASH(3)
7551     MATCH_HASH(4)
7552     MATCH_HASH(6)
7553     MATCH_HASH(7)
7554     MATCH_HASH(8)
7555     MATCH_HASH(10)
7556     MATCH_HASH(12)
7557     MATCH_HASH(14)
7558     MATCH_HASH(16)
7559     MATCH_HASH(24)
7560     MATCH_HASH(25)
7561     MATCH_HASH(26)
7562     MATCH_HASH(27)
7563     MATCH_HASH(28)
7564     MATCH_HASH(29)
7565     MATCH_HASH(30)
7566     MATCH_HASH(31)
7567     MATCH_HASH(32)
7568     MATCH_HASH(40)
7569     MATCH_HASH(48)
7570     MATCH_HASH(64)
7571 #undef MATCH_HASH
7572 #define MATCH_HASH_MINUS(N)                                                    \
7573   case MCK__HASH__MINUS_##N:                                                   \
7574     return MatchesOpImmediate(-N);
7575     MATCH_HASH_MINUS(4)
7576     MATCH_HASH_MINUS(8)
7577     MATCH_HASH_MINUS(16)
7578 #undef MATCH_HASH_MINUS
7579   }
7580 }
7581 
7582 ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
7583 
7584   SMLoc S = getLoc();
7585 
7586   if (getTok().isNot(AsmToken::Identifier))
7587     return Error(S, "expected register");
7588 
7589   MCRegister FirstReg;
7590   ParseStatus Res = tryParseScalarRegister(FirstReg);
7591   if (!Res.isSuccess())
7592     return Error(S, "expected first even register of a consecutive same-size "
7593                     "even/odd register pair");
7594 
7595   const MCRegisterClass &WRegClass =
7596       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
7597   const MCRegisterClass &XRegClass =
7598       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
7599 
7600   bool isXReg = XRegClass.contains(FirstReg),
7601        isWReg = WRegClass.contains(FirstReg);
7602   if (!isXReg && !isWReg)
7603     return Error(S, "expected first even register of a consecutive same-size "
7604                     "even/odd register pair");
7605 
7606   const MCRegisterInfo *RI = getContext().getRegisterInfo();
7607   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
7608 
7609   if (FirstEncoding & 0x1)
7610     return Error(S, "expected first even register of a consecutive same-size "
7611                     "even/odd register pair");
7612 
7613   if (getTok().isNot(AsmToken::Comma))
7614     return Error(getLoc(), "expected comma");
7615   // Eat the comma
7616   Lex();
7617 
7618   SMLoc E = getLoc();
7619   MCRegister SecondReg;
7620   Res = tryParseScalarRegister(SecondReg);
7621   if (!Res.isSuccess())
7622     return Error(E, "expected second odd register of a consecutive same-size "
7623                     "even/odd register pair");
7624 
7625   if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
7626       (isXReg && !XRegClass.contains(SecondReg)) ||
7627       (isWReg && !WRegClass.contains(SecondReg)))
7628     return Error(E, "expected second odd register of a consecutive same-size "
7629                     "even/odd register pair");
7630 
7631   unsigned Pair = 0;
7632   if (isXReg) {
7633     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
7634            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
7635   } else {
7636     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
7637            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
7638   }
7639 
7640   Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
7641       getLoc(), getContext()));
7642 
7643   return ParseStatus::Success;
7644 }
7645 
7646 template <bool ParseShiftExtend, bool ParseSuffix>
7647 ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
7648   const SMLoc S = getLoc();
7649   // Check for a SVE vector register specifier first.
7650   MCRegister RegNum;
7651   StringRef Kind;
7652 
7653   ParseStatus Res =
7654       tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7655 
7656   if (!Res.isSuccess())
7657     return Res;
7658 
7659   if (ParseSuffix && Kind.empty())
7660     return ParseStatus::NoMatch;
7661 
7662   const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
7663   if (!KindRes)
7664     return ParseStatus::NoMatch;
7665 
7666   unsigned ElementWidth = KindRes->second;
7667 
7668   // No shift/extend is the default.
7669   if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
7670     Operands.push_back(AArch64Operand::CreateVectorReg(
7671         RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
7672 
7673     ParseStatus Res = tryParseVectorIndex(Operands);
7674     if (Res.isFailure())
7675       return ParseStatus::Failure;
7676     return ParseStatus::Success;
7677   }
7678 
7679   // Eat the comma
7680   Lex();
7681 
7682   // Match the shift
7683   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
7684   Res = tryParseOptionalShiftExtend(ExtOpnd);
7685   if (!Res.isSuccess())
7686     return Res;
7687 
7688   auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
7689   Operands.push_back(AArch64Operand::CreateVectorReg(
7690       RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
7691       getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
7692       Ext->hasShiftExtendAmount()));
7693 
7694   return ParseStatus::Success;
7695 }
7696 
7697 ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
7698   MCAsmParser &Parser = getParser();
7699 
7700   SMLoc SS = getLoc();
7701   const AsmToken &TokE = getTok();
7702   bool IsHash = TokE.is(AsmToken::Hash);
7703 
7704   if (!IsHash && TokE.isNot(AsmToken::Identifier))
7705     return ParseStatus::NoMatch;
7706 
7707   int64_t Pattern;
7708   if (IsHash) {
7709     Lex(); // Eat hash
7710 
7711     // Parse the immediate operand.
7712     const MCExpr *ImmVal;
7713     SS = getLoc();
7714     if (Parser.parseExpression(ImmVal))
7715       return ParseStatus::Failure;
7716 
7717     auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
7718     if (!MCE)
7719       return ParseStatus::Failure;
7720 
7721     Pattern = MCE->getValue();
7722   } else {
7723     // Parse the pattern
7724     auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
7725     if (!Pat)
7726       return ParseStatus::NoMatch;
7727 
7728     Lex();
7729     Pattern = Pat->Encoding;
7730     assert(Pattern >= 0 && Pattern < 32);
7731   }
7732 
7733   Operands.push_back(
7734       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7735                                 SS, getLoc(), getContext()));
7736 
7737   return ParseStatus::Success;
7738 }
7739 
7740 ParseStatus
7741 AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
7742   int64_t Pattern;
7743   SMLoc SS = getLoc();
7744   const AsmToken &TokE = getTok();
7745   // Parse the pattern
7746   auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
7747       TokE.getString());
7748   if (!Pat)
7749     return ParseStatus::NoMatch;
7750 
7751   Lex();
7752   Pattern = Pat->Encoding;
7753   assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
7754 
7755   Operands.push_back(
7756       AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
7757                                 SS, getLoc(), getContext()));
7758 
7759   return ParseStatus::Success;
7760 }
7761 
7762 ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
7763   SMLoc SS = getLoc();
7764 
7765   MCRegister XReg;
7766   if (!tryParseScalarRegister(XReg).isSuccess())
7767     return ParseStatus::NoMatch;
7768 
7769   MCContext &ctx = getContext();
7770   const MCRegisterInfo *RI = ctx.getRegisterInfo();
7771   int X8Reg = RI->getMatchingSuperReg(
7772       XReg, AArch64::x8sub_0,
7773       &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
7774   if (!X8Reg)
7775     return Error(SS,
7776                  "expected an even-numbered x-register in the range [x0,x22]");
7777 
7778   Operands.push_back(
7779       AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
7780   return ParseStatus::Success;
7781 }
7782 
7783 ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
7784   SMLoc S = getLoc();
7785 
7786   if (getTok().isNot(AsmToken::Integer))
7787     return ParseStatus::NoMatch;
7788 
7789   if (getLexer().peekTok().isNot(AsmToken::Colon))
7790     return ParseStatus::NoMatch;
7791 
7792   const MCExpr *ImmF;
7793   if (getParser().parseExpression(ImmF))
7794     return ParseStatus::NoMatch;
7795 
7796   if (getTok().isNot(AsmToken::Colon))
7797     return ParseStatus::NoMatch;
7798 
7799   Lex(); // Eat ':'
7800   if (getTok().isNot(AsmToken::Integer))
7801     return ParseStatus::NoMatch;
7802 
7803   SMLoc E = getTok().getLoc();
7804   const MCExpr *ImmL;
7805   if (getParser().parseExpression(ImmL))
7806     return ParseStatus::NoMatch;
7807 
7808   unsigned ImmFVal = dyn_cast<MCConstantExpr>(ImmF)->getValue();
7809   unsigned ImmLVal = dyn_cast<MCConstantExpr>(ImmL)->getValue();
7810 
7811   Operands.push_back(
7812       AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
7813   return ParseStatus::Success;
7814 }
7815