1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCStreamer.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
36
37 #include "keystone/arm64.h"
38
39 #include <cstdio>
40 using namespace llvm;
41
42 namespace {
43
44 class AArch64Operand;
45
46 class AArch64AsmParser : public MCTargetAsmParser {
47 private:
48 StringRef Mnemonic; ///< Instruction mnemonic.
49
50 // Map of register aliases registers via the .req directive.
51 StringMap<std::pair<bool, unsigned> > RegisterReqs;
52
getTargetStreamer()53 AArch64TargetStreamer &getTargetStreamer() {
54 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
55 return static_cast<AArch64TargetStreamer &>(TS);
56 }
57
getLoc() const58 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
59
60 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
61 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
62 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
63 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
64 int tryParseRegister();
65 int tryMatchVectorRegister(StringRef &Kind, bool expected);
66 bool parseRegister(OperandVector &Operands);
67 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
68 bool parseVectorList(OperandVector &Operands);
69 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 bool invertCondCode);
71
Warning(SMLoc L,const Twine & Msg)72 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
73 //bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
74 //bool Error(SMLoc L, const Twine &Msg) { return true; }
75 bool showMatchError(SMLoc Loc, unsigned ErrCode);
76
77 bool parseDirectiveWord(unsigned Size, SMLoc L);
78 bool parseDirectiveInst(SMLoc L);
79
80 bool parseDirectiveTLSDescCall(SMLoc L);
81
82 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
83 bool parseDirectiveLtorg(SMLoc L);
84
85 bool parseDirectiveReq(StringRef Name, SMLoc L);
86 bool parseDirectiveUnreq(SMLoc L);
87
88 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
89 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
90 OperandVector &Operands, MCStreamer &Out,
91 uint64_t &ErrorInfo,
92 bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address) override;
93 /// @name Auto-generated Match Functions
94 /// {
95
96 #define GET_ASSEMBLER_HEADER
97 #include "AArch64GenAsmMatcher.inc"
98
99 /// }
100
101 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
102 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
103 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
104 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
105 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
106 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
107 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
108 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
109 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
110 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
111 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
112 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
113 bool tryParseVectorRegister(OperandVector &Operands);
114 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
115
116 public:
117 enum AArch64MatchResultTy {
118 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
119 #define GET_OPERAND_DIAGNOSTIC_TYPES
120 #include "AArch64GenAsmMatcher.inc"
121 };
AArch64AsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)122 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
123 const MCInstrInfo &MII, const MCTargetOptions &Options)
124 : MCTargetAsmParser(Options, STI) {
125 MCAsmParserExtension::Initialize(Parser);
126 MCStreamer &S = getParser().getStreamer();
127 if (S.getTargetStreamer() == nullptr)
128 new AArch64TargetStreamer(S);
129
130 // Initialize the set of available features.
131 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
132 }
133
134 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
135 SMLoc NameLoc, OperandVector &Operands, unsigned int &ErrorCode) override;
136 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc, unsigned int &ErrorCode) override;
137 bool ParseDirective(AsmToken DirectiveID) override;
138 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
139 unsigned Kind) override;
140
141 static bool classifySymbolRef(const MCExpr *Expr,
142 AArch64MCExpr::VariantKind &ELFRefKind,
143 MCSymbolRefExpr::VariantKind &DarwinRefKind,
144 int64_t &Addend);
145 };
146 } // end anonymous namespace
147
148 namespace {
149
150 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
151 /// instruction.
152 class AArch64Operand : public MCParsedAsmOperand {
153 private:
154 enum KindTy {
155 k_Immediate,
156 k_ShiftedImm,
157 k_CondCode,
158 k_Register,
159 k_VectorList,
160 k_VectorIndex,
161 k_Token,
162 k_SysReg,
163 k_SysCR,
164 k_Prefetch,
165 k_ShiftExtend,
166 k_FPImm,
167 k_Barrier,
168 k_PSBHint,
169 } Kind;
170
171 SMLoc StartLoc, EndLoc;
172
173 struct TokOp {
174 const char *Data;
175 unsigned Length;
176 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
177 };
178
179 struct RegOp {
180 unsigned RegNum;
181 bool isVector;
182 };
183
184 struct VectorListOp {
185 unsigned RegNum;
186 unsigned Count;
187 unsigned NumElements;
188 unsigned ElementKind;
189 };
190
191 struct VectorIndexOp {
192 unsigned Val;
193 };
194
195 struct ImmOp {
196 const MCExpr *Val;
197 };
198
199 struct ShiftedImmOp {
200 const MCExpr *Val;
201 unsigned ShiftAmount;
202 };
203
204 struct CondCodeOp {
205 AArch64CC::CondCode Code;
206 };
207
208 struct FPImmOp {
209 unsigned Val; // Encoded 8-bit representation.
210 };
211
212 struct BarrierOp {
213 unsigned Val; // Not the enum since not all values have names.
214 const char *Data;
215 unsigned Length;
216 };
217
218 struct SysRegOp {
219 const char *Data;
220 unsigned Length;
221 uint32_t MRSReg;
222 uint32_t MSRReg;
223 uint32_t PStateField;
224 };
225
226 struct SysCRImmOp {
227 unsigned Val;
228 };
229
230 struct PrefetchOp {
231 unsigned Val;
232 const char *Data;
233 unsigned Length;
234 };
235
236 struct PSBHintOp {
237 unsigned Val;
238 const char *Data;
239 unsigned Length;
240 };
241
242 struct ShiftExtendOp {
243 AArch64_AM::ShiftExtendType Type;
244 unsigned Amount;
245 bool HasExplicitAmount;
246 };
247
248 struct ExtendOp {
249 unsigned Val;
250 };
251
252 union {
253 struct TokOp Tok;
254 struct RegOp Reg;
255 struct VectorListOp VectorList;
256 struct VectorIndexOp VectorIndex;
257 struct ImmOp Imm;
258 struct ShiftedImmOp ShiftedImm;
259 struct CondCodeOp CondCode;
260 struct FPImmOp FPImm;
261 struct BarrierOp Barrier;
262 struct SysRegOp SysReg;
263 struct SysCRImmOp SysCRImm;
264 struct PrefetchOp Prefetch;
265 struct PSBHintOp PSBHint;
266 struct ShiftExtendOp ShiftExtend;
267 };
268
269 // Keep the MCContext around as the MCExprs may need manipulated during
270 // the add<>Operands() calls.
271 MCContext &Ctx;
272
273 public:
AArch64Operand(KindTy K,MCContext & Ctx)274 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
275
AArch64Operand(const AArch64Operand & o)276 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
277 Kind = o.Kind;
278 StartLoc = o.StartLoc;
279 EndLoc = o.EndLoc;
280 switch (Kind) {
281 case k_Token:
282 Tok = o.Tok;
283 break;
284 case k_Immediate:
285 Imm = o.Imm;
286 break;
287 case k_ShiftedImm:
288 ShiftedImm = o.ShiftedImm;
289 break;
290 case k_CondCode:
291 CondCode = o.CondCode;
292 break;
293 case k_FPImm:
294 FPImm = o.FPImm;
295 break;
296 case k_Barrier:
297 Barrier = o.Barrier;
298 break;
299 case k_Register:
300 Reg = o.Reg;
301 break;
302 case k_VectorList:
303 VectorList = o.VectorList;
304 break;
305 case k_VectorIndex:
306 VectorIndex = o.VectorIndex;
307 break;
308 case k_SysReg:
309 SysReg = o.SysReg;
310 break;
311 case k_SysCR:
312 SysCRImm = o.SysCRImm;
313 break;
314 case k_Prefetch:
315 Prefetch = o.Prefetch;
316 break;
317 case k_PSBHint:
318 PSBHint = o.PSBHint;
319 break;
320 case k_ShiftExtend:
321 ShiftExtend = o.ShiftExtend;
322 break;
323 }
324 }
325
326 /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const327 SMLoc getStartLoc() const override { return StartLoc; }
328 /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const329 SMLoc getEndLoc() const override { return EndLoc; }
330
getToken() const331 StringRef getToken() const {
332 assert(Kind == k_Token && "Invalid access!");
333 return StringRef(Tok.Data, Tok.Length);
334 }
335
isTokenSuffix() const336 bool isTokenSuffix() const {
337 assert(Kind == k_Token && "Invalid access!");
338 return Tok.IsSuffix;
339 }
340
getImm() const341 const MCExpr *getImm() const {
342 assert(Kind == k_Immediate && "Invalid access!");
343 return Imm.Val;
344 }
345
getShiftedImmVal() const346 const MCExpr *getShiftedImmVal() const {
347 assert(Kind == k_ShiftedImm && "Invalid access!");
348 return ShiftedImm.Val;
349 }
350
getShiftedImmShift() const351 unsigned getShiftedImmShift() const {
352 assert(Kind == k_ShiftedImm && "Invalid access!");
353 return ShiftedImm.ShiftAmount;
354 }
355
getCondCode() const356 AArch64CC::CondCode getCondCode() const {
357 assert(Kind == k_CondCode && "Invalid access!");
358 return CondCode.Code;
359 }
360
getFPImm() const361 unsigned getFPImm() const {
362 assert(Kind == k_FPImm && "Invalid access!");
363 return FPImm.Val;
364 }
365
getBarrier() const366 unsigned getBarrier() const {
367 assert(Kind == k_Barrier && "Invalid access!");
368 return Barrier.Val;
369 }
370
getBarrierName() const371 StringRef getBarrierName() const {
372 assert(Kind == k_Barrier && "Invalid access!");
373 return StringRef(Barrier.Data, Barrier.Length);
374 }
375
getReg() const376 unsigned getReg() const override {
377 assert(Kind == k_Register && "Invalid access!");
378 return Reg.RegNum;
379 }
380
getVectorListStart() const381 unsigned getVectorListStart() const {
382 assert(Kind == k_VectorList && "Invalid access!");
383 return VectorList.RegNum;
384 }
385
getVectorListCount() const386 unsigned getVectorListCount() const {
387 assert(Kind == k_VectorList && "Invalid access!");
388 return VectorList.Count;
389 }
390
getVectorIndex() const391 unsigned getVectorIndex() const {
392 assert(Kind == k_VectorIndex && "Invalid access!");
393 return VectorIndex.Val;
394 }
395
getSysReg() const396 StringRef getSysReg() const {
397 assert(Kind == k_SysReg && "Invalid access!");
398 return StringRef(SysReg.Data, SysReg.Length);
399 }
400
getSysCR() const401 unsigned getSysCR() const {
402 assert(Kind == k_SysCR && "Invalid access!");
403 return SysCRImm.Val;
404 }
405
getPrefetch() const406 unsigned getPrefetch() const {
407 assert(Kind == k_Prefetch && "Invalid access!");
408 return Prefetch.Val;
409 }
410
getPSBHint() const411 unsigned getPSBHint() const {
412 assert(Kind == k_PSBHint && "Invalid access!");
413 return PSBHint.Val;
414 }
415
getPSBHintName() const416 StringRef getPSBHintName() const {
417 assert(Kind == k_PSBHint && "Invalid access!");
418 return StringRef(PSBHint.Data, PSBHint.Length);
419 }
420
getPrefetchName() const421 StringRef getPrefetchName() const {
422 assert(Kind == k_Prefetch && "Invalid access!");
423 return StringRef(Prefetch.Data, Prefetch.Length);
424 }
425
getShiftExtendType() const426 AArch64_AM::ShiftExtendType getShiftExtendType() const {
427 assert(Kind == k_ShiftExtend && "Invalid access!");
428 return ShiftExtend.Type;
429 }
430
getShiftExtendAmount() const431 unsigned getShiftExtendAmount() const {
432 assert(Kind == k_ShiftExtend && "Invalid access!");
433 return ShiftExtend.Amount;
434 }
435
hasShiftExtendAmount() const436 bool hasShiftExtendAmount() const {
437 assert(Kind == k_ShiftExtend && "Invalid access!");
438 return ShiftExtend.HasExplicitAmount;
439 }
440
isImm() const441 bool isImm() const override { return Kind == k_Immediate; }
isMem() const442 bool isMem() const override { return false; }
isSImm9() const443 bool isSImm9() const {
444 if (!isImm())
445 return false;
446 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
447 if (!MCE)
448 return false;
449 int64_t Val = MCE->getValue();
450 return (Val >= -256 && Val < 256);
451 }
isSImm7s4() const452 bool isSImm7s4() const {
453 if (!isImm())
454 return false;
455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
456 if (!MCE)
457 return false;
458 int64_t Val = MCE->getValue();
459 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
460 }
isSImm7s8() const461 bool isSImm7s8() const {
462 if (!isImm())
463 return false;
464 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
465 if (!MCE)
466 return false;
467 int64_t Val = MCE->getValue();
468 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
469 }
isSImm7s16() const470 bool isSImm7s16() const {
471 if (!isImm())
472 return false;
473 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
474 if (!MCE)
475 return false;
476 int64_t Val = MCE->getValue();
477 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
478 }
479
isSymbolicUImm12Offset(const MCExpr * Expr,unsigned Scale) const480 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
481 AArch64MCExpr::VariantKind ELFRefKind;
482 MCSymbolRefExpr::VariantKind DarwinRefKind;
483 int64_t Addend;
484 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
485 Addend)) {
486 // If we don't understand the expression, assume the best and
487 // let the fixup and relocation code deal with it.
488 return true;
489 }
490
491 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
492 ELFRefKind == AArch64MCExpr::VK_LO12 ||
493 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
494 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
495 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
496 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
497 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
498 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
499 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
500 // Note that we don't range-check the addend. It's adjusted modulo page
501 // size when converted, so there is no "out of range" condition when using
502 // @pageoff.
503 return Addend >= 0 && (Addend % Scale) == 0;
504 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
505 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
506 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
507 return Addend == 0;
508 }
509
510 return false;
511 }
512
isUImm12Offset() const513 template <int Scale> bool isUImm12Offset() const {
514 if (!isImm())
515 return false;
516
517 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
518 if (!MCE)
519 return isSymbolicUImm12Offset(getImm(), Scale);
520
521 int64_t Val = MCE->getValue();
522 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
523 }
524
isImm0_1() const525 bool isImm0_1() const {
526 if (!isImm())
527 return false;
528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
529 if (!MCE)
530 return false;
531 int64_t Val = MCE->getValue();
532 return (Val >= 0 && Val < 2);
533 }
isImm0_7() const534 bool isImm0_7() const {
535 if (!isImm())
536 return false;
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
538 if (!MCE)
539 return false;
540 int64_t Val = MCE->getValue();
541 return (Val >= 0 && Val < 8);
542 }
isImm1_8() const543 bool isImm1_8() const {
544 if (!isImm())
545 return false;
546 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
547 if (!MCE)
548 return false;
549 int64_t Val = MCE->getValue();
550 return (Val > 0 && Val < 9);
551 }
isImm0_15() const552 bool isImm0_15() const {
553 if (!isImm())
554 return false;
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 if (!MCE)
557 return false;
558 int64_t Val = MCE->getValue();
559 return (Val >= 0 && Val < 16);
560 }
isImm1_16() const561 bool isImm1_16() const {
562 if (!isImm())
563 return false;
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565 if (!MCE)
566 return false;
567 int64_t Val = MCE->getValue();
568 return (Val > 0 && Val < 17);
569 }
isImm0_31() const570 bool isImm0_31() const {
571 if (!isImm())
572 return false;
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 if (!MCE)
575 return false;
576 int64_t Val = MCE->getValue();
577 return (Val >= 0 && Val < 32);
578 }
isImm1_31() const579 bool isImm1_31() const {
580 if (!isImm())
581 return false;
582 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
583 if (!MCE)
584 return false;
585 int64_t Val = MCE->getValue();
586 return (Val >= 1 && Val < 32);
587 }
isImm1_32() const588 bool isImm1_32() const {
589 if (!isImm())
590 return false;
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
592 if (!MCE)
593 return false;
594 int64_t Val = MCE->getValue();
595 return (Val >= 1 && Val < 33);
596 }
isImm0_63() const597 bool isImm0_63() const {
598 if (!isImm())
599 return false;
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
601 if (!MCE)
602 return false;
603 int64_t Val = MCE->getValue();
604 return (Val >= 0 && Val < 64);
605 }
isImm1_63() const606 bool isImm1_63() const {
607 if (!isImm())
608 return false;
609 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
610 if (!MCE)
611 return false;
612 int64_t Val = MCE->getValue();
613 return (Val >= 1 && Val < 64);
614 }
isImm1_64() const615 bool isImm1_64() const {
616 if (!isImm())
617 return false;
618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
619 if (!MCE)
620 return false;
621 int64_t Val = MCE->getValue();
622 return (Val >= 1 && Val < 65);
623 }
isImm0_127() const624 bool isImm0_127() const {
625 if (!isImm())
626 return false;
627 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
628 if (!MCE)
629 return false;
630 int64_t Val = MCE->getValue();
631 return (Val >= 0 && Val < 128);
632 }
isImm0_255() const633 bool isImm0_255() const {
634 if (!isImm())
635 return false;
636 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
637 if (!MCE)
638 return false;
639 int64_t Val = MCE->getValue();
640 return (Val >= 0 && Val < 256);
641 }
isImm0_65535() const642 bool isImm0_65535() const {
643 if (!isImm())
644 return false;
645 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
646 if (!MCE)
647 return false;
648 int64_t Val = MCE->getValue();
649 return (Val >= 0 && Val < 65536);
650 }
isImm32_63() const651 bool isImm32_63() const {
652 if (!isImm())
653 return false;
654 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
655 if (!MCE)
656 return false;
657 int64_t Val = MCE->getValue();
658 return (Val >= 32 && Val < 64);
659 }
isLogicalImm32() const660 bool isLogicalImm32() const {
661 if (!isImm())
662 return false;
663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
664 if (!MCE)
665 return false;
666 int64_t Val = MCE->getValue();
667 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
668 return false;
669 Val &= 0xFFFFFFFF;
670 return AArch64_AM::isLogicalImmediate(Val, 32);
671 }
isLogicalImm64() const672 bool isLogicalImm64() const {
673 if (!isImm())
674 return false;
675 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
676 if (!MCE)
677 return false;
678 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
679 }
isLogicalImm32Not() const680 bool isLogicalImm32Not() const {
681 if (!isImm())
682 return false;
683 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
684 if (!MCE)
685 return false;
686 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
687 return AArch64_AM::isLogicalImmediate(Val, 32);
688 }
isLogicalImm64Not() const689 bool isLogicalImm64Not() const {
690 if (!isImm())
691 return false;
692 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693 if (!MCE)
694 return false;
695 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
696 }
isShiftedImm() const697 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
isAddSubImm() const698 bool isAddSubImm() const {
699 if (!isShiftedImm() && !isImm())
700 return false;
701
702 const MCExpr *Expr;
703
704 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
705 if (isShiftedImm()) {
706 unsigned Shift = ShiftedImm.ShiftAmount;
707 Expr = ShiftedImm.Val;
708 if (Shift != 0 && Shift != 12)
709 return false;
710 } else {
711 Expr = getImm();
712 }
713
714 AArch64MCExpr::VariantKind ELFRefKind;
715 MCSymbolRefExpr::VariantKind DarwinRefKind;
716 int64_t Addend;
717 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
718 DarwinRefKind, Addend)) {
719 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
720 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
721 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
722 || ELFRefKind == AArch64MCExpr::VK_LO12
723 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
724 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
725 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
726 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
727 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
728 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
729 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
730 }
731
732 // Otherwise it should be a real immediate in range:
733 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
734 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
735 }
isAddSubImmNeg() const736 bool isAddSubImmNeg() const {
737 if (!isShiftedImm() && !isImm())
738 return false;
739
740 const MCExpr *Expr;
741
742 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
743 if (isShiftedImm()) {
744 unsigned Shift = ShiftedImm.ShiftAmount;
745 Expr = ShiftedImm.Val;
746 if (Shift != 0 && Shift != 12)
747 return false;
748 } else
749 Expr = getImm();
750
751 // Otherwise it should be a real negative immediate in range:
752 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
753 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
754 }
isCondCode() const755 bool isCondCode() const { return Kind == k_CondCode; }
isSIMDImmType10() const756 bool isSIMDImmType10() const {
757 if (!isImm())
758 return false;
759 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
760 if (!MCE)
761 return false;
762 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
763 }
isBranchTarget26() const764 bool isBranchTarget26() const {
765 if (!isImm())
766 return false;
767 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
768 if (!MCE)
769 return true;
770 int64_t Val = MCE->getValue();
771 if (Val & 0x3)
772 return false;
773 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
774 }
isPCRelLabel19() const775 bool isPCRelLabel19() const {
776 if (!isImm())
777 return false;
778 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
779 if (!MCE)
780 return true;
781 int64_t Val = MCE->getValue();
782 if (Val & 0x3)
783 return false;
784 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
785 }
isBranchTarget14() const786 bool isBranchTarget14() const {
787 if (!isImm())
788 return false;
789 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
790 if (!MCE)
791 return true;
792 int64_t Val = MCE->getValue();
793 if (Val & 0x3)
794 return false;
795 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
796 }
797
798 bool
isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const799 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
800 if (!isImm())
801 return false;
802
803 AArch64MCExpr::VariantKind ELFRefKind;
804 MCSymbolRefExpr::VariantKind DarwinRefKind;
805 int64_t Addend;
806 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
807 DarwinRefKind, Addend)) {
808 return false;
809 }
810 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
811 return false;
812
813 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
814 if (ELFRefKind == AllowedModifiers[i])
815 return Addend == 0;
816 }
817
818 return false;
819 }
820
isMovZSymbolG3() const821 bool isMovZSymbolG3() const {
822 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
823 }
824
isMovZSymbolG2() const825 bool isMovZSymbolG2() const {
826 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
827 AArch64MCExpr::VK_TPREL_G2,
828 AArch64MCExpr::VK_DTPREL_G2});
829 }
830
isMovZSymbolG1() const831 bool isMovZSymbolG1() const {
832 return isMovWSymbol({
833 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
834 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
835 AArch64MCExpr::VK_DTPREL_G1,
836 });
837 }
838
isMovZSymbolG0() const839 bool isMovZSymbolG0() const {
840 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
841 AArch64MCExpr::VK_TPREL_G0,
842 AArch64MCExpr::VK_DTPREL_G0});
843 }
844
isMovKSymbolG3() const845 bool isMovKSymbolG3() const {
846 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
847 }
848
isMovKSymbolG2() const849 bool isMovKSymbolG2() const {
850 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
851 }
852
isMovKSymbolG1() const853 bool isMovKSymbolG1() const {
854 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
855 AArch64MCExpr::VK_TPREL_G1_NC,
856 AArch64MCExpr::VK_DTPREL_G1_NC});
857 }
858
isMovKSymbolG0() const859 bool isMovKSymbolG0() const {
860 return isMovWSymbol(
861 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
862 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
863 }
864
865 template<int RegWidth, int Shift>
isMOVZMovAlias() const866 bool isMOVZMovAlias() const {
867 if (!isImm()) return false;
868
869 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
870 if (!CE) return false;
871 uint64_t Value = CE->getValue();
872
873 if (RegWidth == 32)
874 Value &= 0xffffffffULL;
875
876 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
877 if (Value == 0 && Shift != 0)
878 return false;
879
880 return (Value & ~(0xffffULL << Shift)) == 0;
881 }
882
883 template<int RegWidth, int Shift>
isMOVNMovAlias() const884 bool isMOVNMovAlias() const {
885 if (!isImm()) return false;
886
887 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888 if (!CE) return false;
889 uint64_t Value = CE->getValue();
890
891 // MOVZ takes precedence over MOVN.
892 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
893 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
894 return false;
895
896 Value = ~Value;
897 if (RegWidth == 32)
898 Value &= 0xffffffffULL;
899
900 return (Value & ~(0xffffULL << Shift)) == 0;
901 }
902
isFPImm() const903 bool isFPImm() const { return Kind == k_FPImm; }
isBarrier() const904 bool isBarrier() const { return Kind == k_Barrier; }
isSysReg() const905 bool isSysReg() const { return Kind == k_SysReg; }
isMRSSystemRegister() const906 bool isMRSSystemRegister() const {
907 if (!isSysReg()) return false;
908
909 return SysReg.MRSReg != -1U;
910 }
isMSRSystemRegister() const911 bool isMSRSystemRegister() const {
912 if (!isSysReg()) return false;
913 return SysReg.MSRReg != -1U;
914 }
isSystemPStateFieldWithImm0_1() const915 bool isSystemPStateFieldWithImm0_1() const {
916 if (!isSysReg()) return false;
917 return (SysReg.PStateField == AArch64PState::PAN ||
918 SysReg.PStateField == AArch64PState::UAO);
919 }
isSystemPStateFieldWithImm0_15() const920 bool isSystemPStateFieldWithImm0_15() const {
921 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
922 return SysReg.PStateField != -1U;
923 }
isReg() const924 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
isVectorReg() const925 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
isVectorRegLo() const926 bool isVectorRegLo() const {
927 return Kind == k_Register && Reg.isVector &&
928 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
929 Reg.RegNum);
930 }
isGPR32as64() const931 bool isGPR32as64() const {
932 return Kind == k_Register && !Reg.isVector &&
933 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
934 }
isWSeqPair() const935 bool isWSeqPair() const {
936 return Kind == k_Register && !Reg.isVector &&
937 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
938 Reg.RegNum);
939 }
isXSeqPair() const940 bool isXSeqPair() const {
941 return Kind == k_Register && !Reg.isVector &&
942 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
943 Reg.RegNum);
944 }
945
isGPR64sp0() const946 bool isGPR64sp0() const {
947 return Kind == k_Register && !Reg.isVector &&
948 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
949 }
950
951 /// Is this a vector list with the type implicit (presumably attached to the
952 /// instruction itself)?
isImplicitlyTypedVectorList() const953 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
954 return Kind == k_VectorList && VectorList.Count == NumRegs &&
955 !VectorList.ElementKind;
956 }
957
958 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
isTypedVectorList() const959 bool isTypedVectorList() const {
960 if (Kind != k_VectorList)
961 return false;
962 if (VectorList.Count != NumRegs)
963 return false;
964 if (VectorList.ElementKind != ElementKind)
965 return false;
966 return VectorList.NumElements == NumElements;
967 }
968
isVectorIndex1() const969 bool isVectorIndex1() const {
970 return Kind == k_VectorIndex && VectorIndex.Val == 1;
971 }
isVectorIndexB() const972 bool isVectorIndexB() const {
973 return Kind == k_VectorIndex && VectorIndex.Val < 16;
974 }
isVectorIndexH() const975 bool isVectorIndexH() const {
976 return Kind == k_VectorIndex && VectorIndex.Val < 8;
977 }
isVectorIndexS() const978 bool isVectorIndexS() const {
979 return Kind == k_VectorIndex && VectorIndex.Val < 4;
980 }
isVectorIndexD() const981 bool isVectorIndexD() const {
982 return Kind == k_VectorIndex && VectorIndex.Val < 2;
983 }
isToken() const984 bool isToken() const override { return Kind == k_Token; }
isTokenEqual(StringRef Str) const985 bool isTokenEqual(StringRef Str) const {
986 return Kind == k_Token && getToken() == Str;
987 }
isSysCR() const988 bool isSysCR() const { return Kind == k_SysCR; }
isPrefetch() const989 bool isPrefetch() const { return Kind == k_Prefetch; }
isPSBHint() const990 bool isPSBHint() const { return Kind == k_PSBHint; }
isShiftExtend() const991 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
isShifter() const992 bool isShifter() const {
993 if (!isShiftExtend())
994 return false;
995
996 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
997 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
998 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
999 ST == AArch64_AM::MSL);
1000 }
isExtend() const1001 bool isExtend() const {
1002 if (!isShiftExtend())
1003 return false;
1004
1005 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1006 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1007 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1008 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1009 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1010 ET == AArch64_AM::LSL) &&
1011 getShiftExtendAmount() <= 4;
1012 }
1013
isExtend64() const1014 bool isExtend64() const {
1015 if (!isExtend())
1016 return false;
1017 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1018 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1019 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1020 }
isExtendLSL64() const1021 bool isExtendLSL64() const {
1022 if (!isExtend())
1023 return false;
1024 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1025 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1026 ET == AArch64_AM::LSL) &&
1027 getShiftExtendAmount() <= 4;
1028 }
1029
isMemXExtend() const1030 template<int Width> bool isMemXExtend() const {
1031 if (!isExtend())
1032 return false;
1033 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1034 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1035 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1036 getShiftExtendAmount() == 0);
1037 }
1038
isMemWExtend() const1039 template<int Width> bool isMemWExtend() const {
1040 if (!isExtend())
1041 return false;
1042 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1043 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1044 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1045 getShiftExtendAmount() == 0);
1046 }
1047
1048 template <unsigned width>
isArithmeticShifter() const1049 bool isArithmeticShifter() const {
1050 if (!isShifter())
1051 return false;
1052
1053 // An arithmetic shifter is LSL, LSR, or ASR.
1054 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1055 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1056 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1057 }
1058
1059 template <unsigned width>
isLogicalShifter() const1060 bool isLogicalShifter() const {
1061 if (!isShifter())
1062 return false;
1063
1064 // A logical shifter is LSL, LSR, ASR or ROR.
1065 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1066 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1067 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1068 getShiftExtendAmount() < width;
1069 }
1070
isMovImm32Shifter() const1071 bool isMovImm32Shifter() const {
1072 if (!isShifter())
1073 return false;
1074
1075 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1076 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1077 if (ST != AArch64_AM::LSL)
1078 return false;
1079 uint64_t Val = getShiftExtendAmount();
1080 return (Val == 0 || Val == 16);
1081 }
1082
isMovImm64Shifter() const1083 bool isMovImm64Shifter() const {
1084 if (!isShifter())
1085 return false;
1086
1087 // A MOVi shifter is LSL of 0 or 16.
1088 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1089 if (ST != AArch64_AM::LSL)
1090 return false;
1091 uint64_t Val = getShiftExtendAmount();
1092 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1093 }
1094
isLogicalVecShifter() const1095 bool isLogicalVecShifter() const {
1096 if (!isShifter())
1097 return false;
1098
1099 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1100 unsigned Shift = getShiftExtendAmount();
1101 return getShiftExtendType() == AArch64_AM::LSL &&
1102 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1103 }
1104
isLogicalVecHalfWordShifter() const1105 bool isLogicalVecHalfWordShifter() const {
1106 if (!isLogicalVecShifter())
1107 return false;
1108
1109 // A logical vector shifter is a left shift by 0 or 8.
1110 unsigned Shift = getShiftExtendAmount();
1111 return getShiftExtendType() == AArch64_AM::LSL &&
1112 (Shift == 0 || Shift == 8);
1113 }
1114
isMoveVecShifter() const1115 bool isMoveVecShifter() const {
1116 if (!isShiftExtend())
1117 return false;
1118
1119 // A logical vector shifter is a left shift by 8 or 16.
1120 unsigned Shift = getShiftExtendAmount();
1121 return getShiftExtendType() == AArch64_AM::MSL &&
1122 (Shift == 8 || Shift == 16);
1123 }
1124
1125 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1126 // to LDUR/STUR when the offset is not legal for the former but is for
1127 // the latter. As such, in addition to checking for being a legal unscaled
1128 // address, also check that it is not a legal scaled address. This avoids
1129 // ambiguity in the matcher.
1130 template<int Width>
isSImm9OffsetFB() const1131 bool isSImm9OffsetFB() const {
1132 return isSImm9() && !isUImm12Offset<Width / 8>();
1133 }
1134
isAdrpLabel() const1135 bool isAdrpLabel() const {
1136 // Validation was handled during parsing, so we just sanity check that
1137 // something didn't go haywire.
1138 if (!isImm())
1139 return false;
1140
1141 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1142 int64_t Val = CE->getValue();
1143 int64_t Min = - (4096 * (1LL << (21 - 1)));
1144 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1145 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1146 }
1147
1148 return true;
1149 }
1150
isAdrLabel() const1151 bool isAdrLabel() const {
1152 // Validation was handled during parsing, so we just sanity check that
1153 // something didn't go haywire.
1154 if (!isImm())
1155 return false;
1156
1157 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1158 int64_t Val = CE->getValue();
1159 int64_t Min = - (1LL << (21 - 1));
1160 int64_t Max = ((1LL << (21 - 1)) - 1);
1161 return Val >= Min && Val <= Max;
1162 }
1163
1164 return true;
1165 }
1166
addExpr(MCInst & Inst,const MCExpr * Expr) const1167 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1168 // Add as immediates when possible. Null MCExpr = 0.
1169 if (!Expr)
1170 Inst.addOperand(MCOperand::createImm(0));
1171 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1172 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1173 else
1174 Inst.addOperand(MCOperand::createExpr(Expr));
1175 }
1176
addRegOperands(MCInst & Inst,unsigned N) const1177 void addRegOperands(MCInst &Inst, unsigned N) const {
1178 assert(N == 1 && "Invalid number of operands!");
1179 Inst.addOperand(MCOperand::createReg(getReg()));
1180 }
1181
addGPR32as64Operands(MCInst & Inst,unsigned N) const1182 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1183 assert(N == 1 && "Invalid number of operands!");
1184 assert(
1185 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1186
1187 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1188 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1189 RI->getEncodingValue(getReg()));
1190
1191 Inst.addOperand(MCOperand::createReg(Reg));
1192 }
1193
addVectorReg64Operands(MCInst & Inst,unsigned N) const1194 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1195 assert(N == 1 && "Invalid number of operands!");
1196 assert(
1197 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1198 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1199 }
1200
addVectorReg128Operands(MCInst & Inst,unsigned N) const1201 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 assert(
1204 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1205 Inst.addOperand(MCOperand::createReg(getReg()));
1206 }
1207
addVectorRegLoOperands(MCInst & Inst,unsigned N) const1208 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1209 assert(N == 1 && "Invalid number of operands!");
1210 Inst.addOperand(MCOperand::createReg(getReg()));
1211 }
1212
1213 template <unsigned NumRegs>
addVectorList64Operands(MCInst & Inst,unsigned N) const1214 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1215 assert(N == 1 && "Invalid number of operands!");
1216 static const unsigned FirstRegs[] = { AArch64::D0,
1217 AArch64::D0_D1,
1218 AArch64::D0_D1_D2,
1219 AArch64::D0_D1_D2_D3 };
1220 unsigned FirstReg = FirstRegs[NumRegs - 1];
1221
1222 Inst.addOperand(
1223 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1224 }
1225
1226 template <unsigned NumRegs>
addVectorList128Operands(MCInst & Inst,unsigned N) const1227 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1229 static const unsigned FirstRegs[] = { AArch64::Q0,
1230 AArch64::Q0_Q1,
1231 AArch64::Q0_Q1_Q2,
1232 AArch64::Q0_Q1_Q2_Q3 };
1233 unsigned FirstReg = FirstRegs[NumRegs - 1];
1234
1235 Inst.addOperand(
1236 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1237 }
1238
addVectorIndex1Operands(MCInst & Inst,unsigned N) const1239 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1241 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1242 }
1243
addVectorIndexBOperands(MCInst & Inst,unsigned N) const1244 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1247 }
1248
addVectorIndexHOperands(MCInst & Inst,unsigned N) const1249 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1250 assert(N == 1 && "Invalid number of operands!");
1251 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1252 }
1253
addVectorIndexSOperands(MCInst & Inst,unsigned N) const1254 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1255 assert(N == 1 && "Invalid number of operands!");
1256 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1257 }
1258
addVectorIndexDOperands(MCInst & Inst,unsigned N) const1259 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1260 assert(N == 1 && "Invalid number of operands!");
1261 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1262 }
1263
addImmOperands(MCInst & Inst,unsigned N) const1264 void addImmOperands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 // If this is a pageoff symrefexpr with an addend, adjust the addend
1267 // to be only the page-offset portion. Otherwise, just add the expr
1268 // as-is.
1269 addExpr(Inst, getImm());
1270 }
1271
addAddSubImmOperands(MCInst & Inst,unsigned N) const1272 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1273 assert(N == 2 && "Invalid number of operands!");
1274 if (isShiftedImm()) {
1275 addExpr(Inst, getShiftedImmVal());
1276 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1277 } else {
1278 addExpr(Inst, getImm());
1279 Inst.addOperand(MCOperand::createImm(0));
1280 }
1281 }
1282
addAddSubImmNegOperands(MCInst & Inst,unsigned N) const1283 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1284 assert(N == 2 && "Invalid number of operands!");
1285
1286 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1287 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1288 int64_t Val = -CE->getValue();
1289 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1290
1291 Inst.addOperand(MCOperand::createImm(Val));
1292 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1293 }
1294
addCondCodeOperands(MCInst & Inst,unsigned N) const1295 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 Inst.addOperand(MCOperand::createImm(getCondCode()));
1298 }
1299
addAdrpLabelOperands(MCInst & Inst,unsigned N) const1300 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1301 assert(N == 1 && "Invalid number of operands!");
1302 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1303 if (!MCE)
1304 addExpr(Inst, getImm());
1305 else
1306 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1307 }
1308
addAdrLabelOperands(MCInst & Inst,unsigned N) const1309 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1310 addImmOperands(Inst, N);
1311 }
1312
1313 template<int Scale>
addUImm12OffsetOperands(MCInst & Inst,unsigned N) const1314 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1315 assert(N == 1 && "Invalid number of operands!");
1316 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1317
1318 if (!MCE) {
1319 Inst.addOperand(MCOperand::createExpr(getImm()));
1320 return;
1321 }
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1323 }
1324
addSImm9Operands(MCInst & Inst,unsigned N) const1325 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1329 }
1330
addSImm7s4Operands(MCInst & Inst,unsigned N) const1331 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1335 }
1336
addSImm7s8Operands(MCInst & Inst,unsigned N) const1337 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1341 }
1342
addSImm7s16Operands(MCInst & Inst,unsigned N) const1343 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1347 }
1348
addImm0_1Operands(MCInst & Inst,unsigned N) const1349 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1353 }
1354
addImm0_7Operands(MCInst & Inst,unsigned N) const1355 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1358 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1359 }
1360
addImm1_8Operands(MCInst & Inst,unsigned N) const1361 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1362 assert(N == 1 && "Invalid number of operands!");
1363 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1364 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1365 }
1366
addImm0_15Operands(MCInst & Inst,unsigned N) const1367 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1368 assert(N == 1 && "Invalid number of operands!");
1369 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1370 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1371 }
1372
addImm1_16Operands(MCInst & Inst,unsigned N) const1373 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!");
1375 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 assert(MCE && "Invalid constant immediate operand!");
1377 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1378 }
1379
addImm0_31Operands(MCInst & Inst,unsigned N) const1380 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1384 }
1385
addImm1_31Operands(MCInst & Inst,unsigned N) const1386 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1389 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1390 }
1391
addImm1_32Operands(MCInst & Inst,unsigned N) const1392 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1395 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1396 }
1397
addImm0_63Operands(MCInst & Inst,unsigned N) const1398 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1399 assert(N == 1 && "Invalid number of operands!");
1400 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1401 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1402 }
1403
addImm1_63Operands(MCInst & Inst,unsigned N) const1404 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1407 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1408 }
1409
addImm1_64Operands(MCInst & Inst,unsigned N) const1410 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1413 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1414 }
1415
addImm0_127Operands(MCInst & Inst,unsigned N) const1416 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1417 assert(N == 1 && "Invalid number of operands!");
1418 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1419 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1420 }
1421
addImm0_255Operands(MCInst & Inst,unsigned N) const1422 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1423 assert(N == 1 && "Invalid number of operands!");
1424 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1425 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1426 }
1427
addImm0_65535Operands(MCInst & Inst,unsigned N) const1428 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1429 assert(N == 1 && "Invalid number of operands!");
1430 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1431 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1432 }
1433
addImm32_63Operands(MCInst & Inst,unsigned N) const1434 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1435 assert(N == 1 && "Invalid number of operands!");
1436 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1437 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1438 }
1439
addLogicalImm32Operands(MCInst & Inst,unsigned N) const1440 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!");
1442 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1443 uint64_t encoding =
1444 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1445 Inst.addOperand(MCOperand::createImm(encoding));
1446 }
1447
addLogicalImm64Operands(MCInst & Inst,unsigned N) const1448 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1449 assert(N == 1 && "Invalid number of operands!");
1450 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1451 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1452 Inst.addOperand(MCOperand::createImm(encoding));
1453 }
1454
addLogicalImm32NotOperands(MCInst & Inst,unsigned N) const1455 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!");
1457 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1458 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1459 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1460 Inst.addOperand(MCOperand::createImm(encoding));
1461 }
1462
addLogicalImm64NotOperands(MCInst & Inst,unsigned N) const1463 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1464 assert(N == 1 && "Invalid number of operands!");
1465 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1466 uint64_t encoding =
1467 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1468 Inst.addOperand(MCOperand::createImm(encoding));
1469 }
1470
addSIMDImmType10Operands(MCInst & Inst,unsigned N) const1471 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1472 assert(N == 1 && "Invalid number of operands!");
1473 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1474 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1475 Inst.addOperand(MCOperand::createImm(encoding));
1476 }
1477
addBranchTarget26Operands(MCInst & Inst,unsigned N) const1478 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1479 // Branch operands don't encode the low bits, so shift them off
1480 // here. If it's a label, however, just put it on directly as there's
1481 // not enough information now to do anything.
1482 assert(N == 1 && "Invalid number of operands!");
1483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1484 if (!MCE) {
1485 addExpr(Inst, getImm());
1486 return;
1487 }
1488 assert(MCE && "Invalid constant immediate operand!");
1489 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1490 }
1491
addPCRelLabel19Operands(MCInst & Inst,unsigned N) const1492 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1493 // Branch operands don't encode the low bits, so shift them off
1494 // here. If it's a label, however, just put it on directly as there's
1495 // not enough information now to do anything.
1496 assert(N == 1 && "Invalid number of operands!");
1497 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1498 if (!MCE) {
1499 addExpr(Inst, getImm());
1500 return;
1501 }
1502 assert(MCE && "Invalid constant immediate operand!");
1503 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1504 }
1505
addBranchTarget14Operands(MCInst & Inst,unsigned N) const1506 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1507 // Branch operands don't encode the low bits, so shift them off
1508 // here. If it's a label, however, just put it on directly as there's
1509 // not enough information now to do anything.
1510 assert(N == 1 && "Invalid number of operands!");
1511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1512 if (!MCE) {
1513 addExpr(Inst, getImm());
1514 return;
1515 }
1516 assert(MCE && "Invalid constant immediate operand!");
1517 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1518 }
1519
addFPImmOperands(MCInst & Inst,unsigned N) const1520 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1521 assert(N == 1 && "Invalid number of operands!");
1522 Inst.addOperand(MCOperand::createImm(getFPImm()));
1523 }
1524
addBarrierOperands(MCInst & Inst,unsigned N) const1525 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1526 assert(N == 1 && "Invalid number of operands!");
1527 Inst.addOperand(MCOperand::createImm(getBarrier()));
1528 }
1529
addMRSSystemRegisterOperands(MCInst & Inst,unsigned N) const1530 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1531 assert(N == 1 && "Invalid number of operands!");
1532
1533 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1534 }
1535
addMSRSystemRegisterOperands(MCInst & Inst,unsigned N) const1536 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 1 && "Invalid number of operands!");
1538
1539 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1540 }
1541
addSystemPStateFieldWithImm0_1Operands(MCInst & Inst,unsigned N) const1542 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1543 assert(N == 1 && "Invalid number of operands!");
1544
1545 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1546 }
1547
addSystemPStateFieldWithImm0_15Operands(MCInst & Inst,unsigned N) const1548 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1549 assert(N == 1 && "Invalid number of operands!");
1550
1551 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1552 }
1553
addSysCROperands(MCInst & Inst,unsigned N) const1554 void addSysCROperands(MCInst &Inst, unsigned N) const {
1555 assert(N == 1 && "Invalid number of operands!");
1556 Inst.addOperand(MCOperand::createImm(getSysCR()));
1557 }
1558
addPrefetchOperands(MCInst & Inst,unsigned N) const1559 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1560 assert(N == 1 && "Invalid number of operands!");
1561 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1562 }
1563
addPSBHintOperands(MCInst & Inst,unsigned N) const1564 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1565 assert(N == 1 && "Invalid number of operands!");
1566 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1567 }
1568
addShifterOperands(MCInst & Inst,unsigned N) const1569 void addShifterOperands(MCInst &Inst, unsigned N) const {
1570 assert(N == 1 && "Invalid number of operands!");
1571 unsigned Imm =
1572 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1573 Inst.addOperand(MCOperand::createImm(Imm));
1574 }
1575
addExtendOperands(MCInst & Inst,unsigned N) const1576 void addExtendOperands(MCInst &Inst, unsigned N) const {
1577 assert(N == 1 && "Invalid number of operands!");
1578 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1579 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1580 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1581 Inst.addOperand(MCOperand::createImm(Imm));
1582 }
1583
addExtend64Operands(MCInst & Inst,unsigned N) const1584 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1585 assert(N == 1 && "Invalid number of operands!");
1586 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1587 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1588 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1589 Inst.addOperand(MCOperand::createImm(Imm));
1590 }
1591
addMemExtendOperands(MCInst & Inst,unsigned N) const1592 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1593 assert(N == 2 && "Invalid number of operands!");
1594 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1595 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1596 Inst.addOperand(MCOperand::createImm(IsSigned));
1597 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1598 }
1599
1600 // For 8-bit load/store instructions with a register offset, both the
1601 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1602 // they're disambiguated by whether the shift was explicit or implicit rather
1603 // than its size.
addMemExtend8Operands(MCInst & Inst,unsigned N) const1604 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1605 assert(N == 2 && "Invalid number of operands!");
1606 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1607 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1608 Inst.addOperand(MCOperand::createImm(IsSigned));
1609 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1610 }
1611
1612 template<int Shift>
addMOVZMovAliasOperands(MCInst & Inst,unsigned N) const1613 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1614 assert(N == 1 && "Invalid number of operands!");
1615
1616 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1617 uint64_t Value = CE->getValue();
1618 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1619 }
1620
1621 template<int Shift>
addMOVNMovAliasOperands(MCInst & Inst,unsigned N) const1622 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1623 assert(N == 1 && "Invalid number of operands!");
1624
1625 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1626 uint64_t Value = CE->getValue();
1627 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1628 }
1629
1630 void print(raw_ostream &OS) const override;
1631
1632 static std::unique_ptr<AArch64Operand>
CreateToken(StringRef Str,bool IsSuffix,SMLoc S,MCContext & Ctx)1633 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1634 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1635 Op->Tok.Data = Str.data();
1636 Op->Tok.Length = Str.size();
1637 Op->Tok.IsSuffix = IsSuffix;
1638 Op->StartLoc = S;
1639 Op->EndLoc = S;
1640 return Op;
1641 }
1642
1643 static std::unique_ptr<AArch64Operand>
CreateReg(unsigned RegNum,bool isVector,SMLoc S,SMLoc E,MCContext & Ctx)1644 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1645 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1646 Op->Reg.RegNum = RegNum;
1647 Op->Reg.isVector = isVector;
1648 Op->StartLoc = S;
1649 Op->EndLoc = E;
1650 return Op;
1651 }
1652
1653 static std::unique_ptr<AArch64Operand>
CreateVectorList(unsigned RegNum,unsigned Count,unsigned NumElements,char ElementKind,SMLoc S,SMLoc E,MCContext & Ctx)1654 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1655 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1656 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1657 Op->VectorList.RegNum = RegNum;
1658 Op->VectorList.Count = Count;
1659 Op->VectorList.NumElements = NumElements;
1660 Op->VectorList.ElementKind = ElementKind;
1661 Op->StartLoc = S;
1662 Op->EndLoc = E;
1663 return Op;
1664 }
1665
1666 static std::unique_ptr<AArch64Operand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)1667 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1668 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1669 Op->VectorIndex.Val = Idx;
1670 Op->StartLoc = S;
1671 Op->EndLoc = E;
1672 return Op;
1673 }
1674
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E,MCContext & Ctx)1675 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1676 SMLoc E, MCContext &Ctx) {
1677 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1678 Op->Imm.Val = Val;
1679 Op->StartLoc = S;
1680 Op->EndLoc = E;
1681 return Op;
1682 }
1683
CreateShiftedImm(const MCExpr * Val,unsigned ShiftAmount,SMLoc S,SMLoc E,MCContext & Ctx)1684 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1685 unsigned ShiftAmount,
1686 SMLoc S, SMLoc E,
1687 MCContext &Ctx) {
1688 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1689 Op->ShiftedImm .Val = Val;
1690 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1691 Op->StartLoc = S;
1692 Op->EndLoc = E;
1693 return Op;
1694 }
1695
1696 static std::unique_ptr<AArch64Operand>
CreateCondCode(AArch64CC::CondCode Code,SMLoc S,SMLoc E,MCContext & Ctx)1697 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1698 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1699 Op->CondCode.Code = Code;
1700 Op->StartLoc = S;
1701 Op->EndLoc = E;
1702 return Op;
1703 }
1704
CreateFPImm(unsigned Val,SMLoc S,MCContext & Ctx)1705 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1706 MCContext &Ctx) {
1707 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1708 Op->FPImm.Val = Val;
1709 Op->StartLoc = S;
1710 Op->EndLoc = S;
1711 return Op;
1712 }
1713
CreateBarrier(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1714 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1715 StringRef Str,
1716 SMLoc S,
1717 MCContext &Ctx) {
1718 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1719 Op->Barrier.Val = Val;
1720 Op->Barrier.Data = Str.data();
1721 Op->Barrier.Length = Str.size();
1722 Op->StartLoc = S;
1723 Op->EndLoc = S;
1724 return Op;
1725 }
1726
CreateSysReg(StringRef Str,SMLoc S,uint32_t MRSReg,uint32_t MSRReg,uint32_t PStateField,MCContext & Ctx)1727 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1728 uint32_t MRSReg,
1729 uint32_t MSRReg,
1730 uint32_t PStateField,
1731 MCContext &Ctx) {
1732 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1733 Op->SysReg.Data = Str.data();
1734 Op->SysReg.Length = Str.size();
1735 Op->SysReg.MRSReg = MRSReg;
1736 Op->SysReg.MSRReg = MSRReg;
1737 Op->SysReg.PStateField = PStateField;
1738 Op->StartLoc = S;
1739 Op->EndLoc = S;
1740 return Op;
1741 }
1742
CreateSysCR(unsigned Val,SMLoc S,SMLoc E,MCContext & Ctx)1743 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1744 SMLoc E, MCContext &Ctx) {
1745 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1746 Op->SysCRImm.Val = Val;
1747 Op->StartLoc = S;
1748 Op->EndLoc = E;
1749 return Op;
1750 }
1751
CreatePrefetch(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1752 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1753 StringRef Str,
1754 SMLoc S,
1755 MCContext &Ctx) {
1756 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1757 Op->Prefetch.Val = Val;
1758 Op->Barrier.Data = Str.data();
1759 Op->Barrier.Length = Str.size();
1760 Op->StartLoc = S;
1761 Op->EndLoc = S;
1762 return Op;
1763 }
1764
CreatePSBHint(unsigned Val,StringRef Str,SMLoc S,MCContext & Ctx)1765 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1766 StringRef Str,
1767 SMLoc S,
1768 MCContext &Ctx) {
1769 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1770 Op->PSBHint.Val = Val;
1771 Op->PSBHint.Data = Str.data();
1772 Op->PSBHint.Length = Str.size();
1773 Op->StartLoc = S;
1774 Op->EndLoc = S;
1775 return Op;
1776 }
1777
1778 static std::unique_ptr<AArch64Operand>
CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,unsigned Val,bool HasExplicitAmount,SMLoc S,SMLoc E,MCContext & Ctx)1779 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1780 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1781 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1782 Op->ShiftExtend.Type = ShOp;
1783 Op->ShiftExtend.Amount = Val;
1784 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1785 Op->StartLoc = S;
1786 Op->EndLoc = E;
1787 return Op;
1788 }
1789 };
1790
1791 } // end anonymous namespace.
1792
print(raw_ostream & OS) const1793 void AArch64Operand::print(raw_ostream &OS) const {
1794 switch (Kind) {
1795 case k_FPImm:
1796 OS << "<fpimm " << getFPImm() << "("
1797 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1798 break;
1799 case k_Barrier: {
1800 StringRef Name = getBarrierName();
1801 if (!Name.empty())
1802 OS << "<barrier " << Name << ">";
1803 else
1804 OS << "<barrier invalid #" << getBarrier() << ">";
1805 break;
1806 }
1807 case k_Immediate:
1808 OS << *getImm();
1809 break;
1810 case k_ShiftedImm: {
1811 unsigned Shift = getShiftedImmShift();
1812 OS << "<shiftedimm ";
1813 OS << *getShiftedImmVal();
1814 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1815 break;
1816 }
1817 case k_CondCode:
1818 OS << "<condcode " << getCondCode() << ">";
1819 break;
1820 case k_Register:
1821 OS << "<register " << getReg() << ">";
1822 break;
1823 case k_VectorList: {
1824 OS << "<vectorlist ";
1825 unsigned Reg = getVectorListStart();
1826 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1827 OS << Reg + i << " ";
1828 OS << ">";
1829 break;
1830 }
1831 case k_VectorIndex:
1832 OS << "<vectorindex " << getVectorIndex() << ">";
1833 break;
1834 case k_SysReg:
1835 OS << "<sysreg: " << getSysReg() << '>';
1836 break;
1837 case k_Token:
1838 OS << "'" << getToken() << "'";
1839 break;
1840 case k_SysCR:
1841 OS << "c" << getSysCR();
1842 break;
1843 case k_Prefetch: {
1844 StringRef Name = getPrefetchName();
1845 if (!Name.empty())
1846 OS << "<prfop " << Name << ">";
1847 else
1848 OS << "<prfop invalid #" << getPrefetch() << ">";
1849 break;
1850 }
1851 case k_PSBHint: {
1852 OS << getPSBHintName();
1853 break;
1854 }
1855 case k_ShiftExtend: {
1856 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1857 << getShiftExtendAmount();
1858 if (!hasShiftExtendAmount())
1859 OS << "<imp>";
1860 OS << '>';
1861 break;
1862 }
1863 }
1864 }
1865
1866 /// @name Auto-generated Match Functions
1867 /// {
1868
1869 static unsigned MatchRegisterName(StringRef Name);
1870
1871 /// }
1872
matchVectorRegName(StringRef Name)1873 static unsigned matchVectorRegName(StringRef Name) {
1874 return StringSwitch<unsigned>(Name.lower())
1875 .Case("v0", AArch64::Q0)
1876 .Case("v1", AArch64::Q1)
1877 .Case("v2", AArch64::Q2)
1878 .Case("v3", AArch64::Q3)
1879 .Case("v4", AArch64::Q4)
1880 .Case("v5", AArch64::Q5)
1881 .Case("v6", AArch64::Q6)
1882 .Case("v7", AArch64::Q7)
1883 .Case("v8", AArch64::Q8)
1884 .Case("v9", AArch64::Q9)
1885 .Case("v10", AArch64::Q10)
1886 .Case("v11", AArch64::Q11)
1887 .Case("v12", AArch64::Q12)
1888 .Case("v13", AArch64::Q13)
1889 .Case("v14", AArch64::Q14)
1890 .Case("v15", AArch64::Q15)
1891 .Case("v16", AArch64::Q16)
1892 .Case("v17", AArch64::Q17)
1893 .Case("v18", AArch64::Q18)
1894 .Case("v19", AArch64::Q19)
1895 .Case("v20", AArch64::Q20)
1896 .Case("v21", AArch64::Q21)
1897 .Case("v22", AArch64::Q22)
1898 .Case("v23", AArch64::Q23)
1899 .Case("v24", AArch64::Q24)
1900 .Case("v25", AArch64::Q25)
1901 .Case("v26", AArch64::Q26)
1902 .Case("v27", AArch64::Q27)
1903 .Case("v28", AArch64::Q28)
1904 .Case("v29", AArch64::Q29)
1905 .Case("v30", AArch64::Q30)
1906 .Case("v31", AArch64::Q31)
1907 .Default(0);
1908 }
1909
isValidVectorKind(StringRef Name)1910 static bool isValidVectorKind(StringRef Name) {
1911 return StringSwitch<bool>(Name.lower())
1912 .Case(".8b", true)
1913 .Case(".16b", true)
1914 .Case(".4h", true)
1915 .Case(".8h", true)
1916 .Case(".2s", true)
1917 .Case(".4s", true)
1918 .Case(".1d", true)
1919 .Case(".2d", true)
1920 .Case(".1q", true)
1921 // Accept the width neutral ones, too, for verbose syntax. If those
1922 // aren't used in the right places, the token operand won't match so
1923 // all will work out.
1924 .Case(".b", true)
1925 .Case(".h", true)
1926 .Case(".s", true)
1927 .Case(".d", true)
1928 // Needed for fp16 scalar pairwise reductions
1929 .Case(".2h", true)
1930 .Default(false);
1931 }
1932
parseValidVectorKind(StringRef Name,unsigned & NumElements,char & ElementKind)1933 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1934 char &ElementKind) {
1935 assert(isValidVectorKind(Name));
1936
1937 ElementKind = Name.lower()[Name.size() - 1];
1938 NumElements = 0;
1939
1940 if (Name.size() == 2)
1941 return;
1942
1943 // Parse the lane count
1944 Name = Name.drop_front();
1945 while (isdigit(Name.front())) {
1946 NumElements = 10 * NumElements + (Name.front() - '0');
1947 Name = Name.drop_front();
1948 }
1949 }
1950
ParseRegister(unsigned & RegNo,SMLoc & StartLoc,SMLoc & EndLoc,unsigned int & ErrorCode)1951 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1952 SMLoc &EndLoc, unsigned int &ErrorCode) {
1953 StartLoc = getLoc();
1954 RegNo = tryParseRegister();
1955 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1956 return (RegNo == (unsigned)-1);
1957 }
1958
1959 // Matches a register name or register alias previously defined by '.req'
matchRegisterNameAlias(StringRef Name,bool isVector)1960 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1961 bool isVector) {
1962 unsigned RegNum = isVector ? matchVectorRegName(Name)
1963 : MatchRegisterName(Name);
1964
1965 if (RegNum == 0) {
1966 // Check for aliases registered via .req. Canonicalize to lower case.
1967 // That's more consistent since register names are case insensitive, and
1968 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1969 auto Entry = RegisterReqs.find(Name.lower());
1970 if (Entry == RegisterReqs.end())
1971 return 0;
1972 // set RegNum if the match is the right kind of register
1973 if (isVector == Entry->getValue().first)
1974 RegNum = Entry->getValue().second;
1975 }
1976 return RegNum;
1977 }
1978
1979 /// tryParseRegister - Try to parse a register name. The token must be an
1980 /// Identifier when called, and if it is a register name the token is eaten and
1981 /// the register is added to the operand list.
tryParseRegister()1982 int AArch64AsmParser::tryParseRegister() {
1983 MCAsmParser &Parser = getParser();
1984 const AsmToken &Tok = Parser.getTok();
1985 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1986
1987 std::string lowerCase = Tok.getString().lower();
1988 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1989 // Also handle a few aliases of registers.
1990 if (RegNum == 0)
1991 RegNum = StringSwitch<unsigned>(lowerCase)
1992 .Case("fp", AArch64::FP)
1993 .Case("lr", AArch64::LR)
1994 .Case("x31", AArch64::XZR)
1995 .Case("w31", AArch64::WZR)
1996 .Default(0);
1997
1998 if (RegNum == 0)
1999 return -1;
2000
2001 Parser.Lex(); // Eat identifier token.
2002 return RegNum;
2003 }
2004
2005 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2006 /// kind specifier. If it is a register specifier, eat the token and return it.
tryMatchVectorRegister(StringRef & Kind,bool expected)2007 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected)
2008 {
2009 MCAsmParser &Parser = getParser();
2010 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2011 //TokError("vector register expected");
2012 return -1;
2013 }
2014
2015 StringRef Name = Parser.getTok().getString();
2016 // If there is a kind specifier, it's separated from the register name by
2017 // a '.'.
2018 size_t Start = 0, Next = Name.find('.');
2019 StringRef Head = Name.slice(Start, Next);
2020 unsigned RegNum = matchRegisterNameAlias(Head, true);
2021
2022 if (RegNum) {
2023 if (Next != StringRef::npos) {
2024 Kind = Name.slice(Next, StringRef::npos);
2025 if (!isValidVectorKind(Kind)) {
2026 //TokError("invalid vector kind qualifier");
2027 return -1;
2028 }
2029 }
2030 Parser.Lex(); // Eat the register token.
2031 return RegNum;
2032 }
2033
2034 //if (expected)
2035 // TokError("vector register expected");
2036 return -1;
2037 }
2038
2039 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2040 AArch64AsmParser::OperandMatchResultTy
tryParseSysCROperand(OperandVector & Operands)2041 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands)
2042 {
2043 MCAsmParser &Parser = getParser();
2044 SMLoc S = getLoc();
2045
2046 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2047 //Error(S, "Expected cN operand where 0 <= N <= 15");
2048 return MatchOperand_ParseFail;
2049 }
2050
2051 StringRef Tok = Parser.getTok().getIdentifier();
2052 if (Tok[0] != 'c' && Tok[0] != 'C') {
2053 //Error(S, "Expected cN operand where 0 <= N <= 15");
2054 return MatchOperand_ParseFail;
2055 }
2056
2057 uint32_t CRNum;
2058 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2059 if (BadNum || CRNum > 15) {
2060 //Error(S, "Expected cN operand where 0 <= N <= 15");
2061 return MatchOperand_ParseFail;
2062 }
2063
2064 Parser.Lex(); // Eat identifier token.
2065 Operands.push_back(
2066 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2067 return MatchOperand_Success;
2068 }
2069
2070 /// tryParsePrefetch - Try to parse a prefetch operand.
2071 AArch64AsmParser::OperandMatchResultTy
tryParsePrefetch(OperandVector & Operands)2072 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2073 MCAsmParser &Parser = getParser();
2074 SMLoc S = getLoc();
2075 const AsmToken &Tok = Parser.getTok();
2076 // Either an identifier for named values or a 5-bit immediate.
2077 bool Hash = Tok.is(AsmToken::Hash);
2078 if (Hash || Tok.is(AsmToken::Integer)) {
2079 if (Hash)
2080 Parser.Lex(); // Eat hash token.
2081 const MCExpr *ImmVal;
2082 if (getParser().parseExpression(ImmVal))
2083 return MatchOperand_ParseFail;
2084
2085 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2086 if (!MCE) {
2087 //TokError("immediate value expected for prefetch operand");
2088 return MatchOperand_ParseFail;
2089 }
2090 unsigned prfop = MCE->getValue();
2091 if (prfop > 31) {
2092 //TokError("prefetch operand out of range, [0,31] expected");
2093 return MatchOperand_ParseFail;
2094 }
2095
2096 bool Valid;
2097 auto Mapper = AArch64PRFM::PRFMMapper();
2098 StringRef Name =
2099 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2100 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2101 S, getContext()));
2102 return MatchOperand_Success;
2103 }
2104
2105 if (Tok.isNot(AsmToken::Identifier)) {
2106 //TokError("pre-fetch hint expected");
2107 return MatchOperand_ParseFail;
2108 }
2109
2110 bool Valid;
2111 auto Mapper = AArch64PRFM::PRFMMapper();
2112 unsigned prfop =
2113 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2114 if (!Valid) {
2115 //TokError("pre-fetch hint expected");
2116 return MatchOperand_ParseFail;
2117 }
2118
2119 Parser.Lex(); // Eat identifier token.
2120 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2121 S, getContext()));
2122 return MatchOperand_Success;
2123 }
2124
2125 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2126 AArch64AsmParser::OperandMatchResultTy
tryParsePSBHint(OperandVector & Operands)2127 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2128 MCAsmParser &Parser = getParser();
2129 SMLoc S = getLoc();
2130 const AsmToken &Tok = Parser.getTok();
2131 if (Tok.isNot(AsmToken::Identifier)) {
2132 //TokError("invalid operand for instruction");
2133 return MatchOperand_ParseFail;
2134 }
2135
2136 bool Valid;
2137 auto Mapper = AArch64PSBHint::PSBHintMapper();
2138 unsigned psbhint =
2139 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2140 if (!Valid) {
2141 //TokError("invalid operand for instruction");
2142 return MatchOperand_ParseFail;
2143 }
2144
2145 Parser.Lex(); // Eat identifier token.
2146 Operands.push_back(AArch64Operand::CreatePSBHint(psbhint, Tok.getString(),
2147 S, getContext()));
2148 return MatchOperand_Success;
2149 }
2150
2151 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2152 /// instruction.
2153 AArch64AsmParser::OperandMatchResultTy
tryParseAdrpLabel(OperandVector & Operands)2154 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2155 MCAsmParser &Parser = getParser();
2156 SMLoc S = getLoc();
2157 const MCExpr *Expr;
2158
2159 if (Parser.getTok().is(AsmToken::Hash)) {
2160 Parser.Lex(); // Eat hash token.
2161 }
2162
2163 if (parseSymbolicImmVal(Expr))
2164 return MatchOperand_ParseFail;
2165
2166 AArch64MCExpr::VariantKind ELFRefKind;
2167 MCSymbolRefExpr::VariantKind DarwinRefKind;
2168 int64_t Addend;
2169 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2170 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2171 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2172 // No modifier was specified at all; this is the syntax for an ELF basic
2173 // ADRP relocation (unfortunately).
2174 Expr =
2175 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2176 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2177 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2178 Addend != 0) {
2179 //Error(S, "gotpage label reference not allowed an addend");
2180 return MatchOperand_ParseFail;
2181 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2182 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2183 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2184 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2185 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2186 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2187 // The operand must be an @page or @gotpage qualified symbolref.
2188 //Error(S, "page or gotpage label reference expected");
2189 return MatchOperand_ParseFail;
2190 }
2191 }
2192
2193 // We have either a label reference possibly with addend or an immediate. The
2194 // addend is a raw value here. The linker will adjust it to only reference the
2195 // page.
2196 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2197 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2198
2199 return MatchOperand_Success;
2200 }
2201
2202 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2203 /// instruction.
2204 AArch64AsmParser::OperandMatchResultTy
tryParseAdrLabel(OperandVector & Operands)2205 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2206 MCAsmParser &Parser = getParser();
2207 SMLoc S = getLoc();
2208 const MCExpr *Expr;
2209
2210 if (Parser.getTok().is(AsmToken::Hash)) {
2211 Parser.Lex(); // Eat hash token.
2212 }
2213
2214 if (getParser().parseExpression(Expr))
2215 return MatchOperand_ParseFail;
2216
2217 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2218 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2219
2220 return MatchOperand_Success;
2221 }
2222
2223 /// tryParseFPImm - A floating point immediate expression operand.
2224 AArch64AsmParser::OperandMatchResultTy
tryParseFPImm(OperandVector & Operands)2225 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2226 MCAsmParser &Parser = getParser();
2227 SMLoc S = getLoc();
2228
2229 bool Hash = false;
2230 if (Parser.getTok().is(AsmToken::Hash)) {
2231 Parser.Lex(); // Eat '#'
2232 Hash = true;
2233 }
2234
2235 // Handle negation, as that still comes through as a separate token.
2236 bool isNegative = false;
2237 if (Parser.getTok().is(AsmToken::Minus)) {
2238 isNegative = true;
2239 Parser.Lex();
2240 }
2241 const AsmToken &Tok = Parser.getTok();
2242 if (Tok.is(AsmToken::Real)) {
2243 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2244 if (isNegative)
2245 RealVal.changeSign();
2246
2247 if (RealVal.bitcastToAPInt().getActiveBits() > 64)
2248 return MatchOperand_ParseFail;
2249 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2250 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2251 Parser.Lex(); // Eat the token.
2252 // Check for out of range values. As an exception, we let Zero through,
2253 // as we handle that special case in post-processing before matching in
2254 // order to use the zero register for it.
2255 if (Val == -1 && !RealVal.isPosZero()) {
2256 //TokError("expected compatible register or floating-point constant");
2257 return MatchOperand_ParseFail;
2258 }
2259 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2260 return MatchOperand_Success;
2261 }
2262 if (Tok.is(AsmToken::Integer)) {
2263 int64_t Val;
2264 if (!isNegative && Tok.getString().startswith("0x")) {
2265 bool valid;
2266 Val = Tok.getIntVal(valid);
2267 if (!valid)
2268 return MatchOperand_ParseFail;
2269 if (Val > 255 || Val < 0) {
2270 //TokError("encoded floating point value out of range");
2271 return MatchOperand_ParseFail;
2272 }
2273 } else {
2274 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2275 if (RealVal.bitcastToAPInt().getActiveBits() > 64)
2276 return MatchOperand_ParseFail;
2277 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2278 // If we had a '-' in front, toggle the sign bit.
2279 IntVal ^= (uint64_t)isNegative << 63;
2280 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2281 }
2282 Parser.Lex(); // Eat the token.
2283 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2284 return MatchOperand_Success;
2285 }
2286
2287 if (!Hash)
2288 return MatchOperand_NoMatch;
2289
2290 //TokError("invalid floating point immediate");
2291 return MatchOperand_ParseFail;
2292 }
2293
2294 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2295 AArch64AsmParser::OperandMatchResultTy
tryParseAddSubImm(OperandVector & Operands)2296 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2297 MCAsmParser &Parser = getParser();
2298 SMLoc S = getLoc();
2299
2300 if (Parser.getTok().is(AsmToken::Hash))
2301 Parser.Lex(); // Eat '#'
2302 else if (Parser.getTok().isNot(AsmToken::Integer))
2303 // Operand should start from # or should be integer, emit error otherwise.
2304 return MatchOperand_NoMatch;
2305
2306 const MCExpr *Imm;
2307 if (parseSymbolicImmVal(Imm))
2308 return MatchOperand_ParseFail;
2309 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2310 uint64_t ShiftAmount = 0;
2311 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2312 if (MCE) {
2313 int64_t Val = MCE->getValue();
2314 if (Val > 0xfff && (Val & 0xfff) == 0) {
2315 Imm = MCConstantExpr::create(Val >> 12, getContext());
2316 ShiftAmount = 12;
2317 }
2318 }
2319 SMLoc E = Parser.getTok().getLoc();
2320 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2321 getContext()));
2322 return MatchOperand_Success;
2323 }
2324
2325 // Eat ','
2326 Parser.Lex();
2327
2328 // The optional operand must be "lsl #N" where N is non-negative.
2329 if (!Parser.getTok().is(AsmToken::Identifier) ||
2330 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2331 //Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2332 return MatchOperand_ParseFail;
2333 }
2334
2335 // Eat 'lsl'
2336 Parser.Lex();
2337
2338 if (Parser.getTok().is(AsmToken::Hash)) {
2339 Parser.Lex();
2340 }
2341
2342 if (Parser.getTok().isNot(AsmToken::Integer)) {
2343 //Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2344 return MatchOperand_ParseFail;
2345 }
2346
2347 bool valid;
2348 int64_t ShiftAmount = Parser.getTok().getIntVal(valid);
2349 if (!valid)
2350 return MatchOperand_ParseFail;
2351
2352 if (ShiftAmount < 0) {
2353 //Error(Parser.getTok().getLoc(), "positive shift amount required");
2354 return MatchOperand_ParseFail;
2355 }
2356 Parser.Lex(); // Eat the number
2357
2358 SMLoc E = Parser.getTok().getLoc();
2359 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2360 S, E, getContext()));
2361 return MatchOperand_Success;
2362 }
2363
2364 /// parseCondCodeString - Parse a Condition Code string.
parseCondCodeString(StringRef Cond)2365 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2366 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2367 .Case("eq", AArch64CC::EQ)
2368 .Case("ne", AArch64CC::NE)
2369 .Case("cs", AArch64CC::HS)
2370 .Case("hs", AArch64CC::HS)
2371 .Case("cc", AArch64CC::LO)
2372 .Case("lo", AArch64CC::LO)
2373 .Case("mi", AArch64CC::MI)
2374 .Case("pl", AArch64CC::PL)
2375 .Case("vs", AArch64CC::VS)
2376 .Case("vc", AArch64CC::VC)
2377 .Case("hi", AArch64CC::HI)
2378 .Case("ls", AArch64CC::LS)
2379 .Case("ge", AArch64CC::GE)
2380 .Case("lt", AArch64CC::LT)
2381 .Case("gt", AArch64CC::GT)
2382 .Case("le", AArch64CC::LE)
2383 .Case("al", AArch64CC::AL)
2384 .Case("nv", AArch64CC::NV)
2385 .Default(AArch64CC::Invalid);
2386 return CC;
2387 }
2388
2389 /// parseCondCode - Parse a Condition Code operand.
parseCondCode(OperandVector & Operands,bool invertCondCode)2390 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2391 bool invertCondCode) {
2392 MCAsmParser &Parser = getParser();
2393 SMLoc S = getLoc();
2394 const AsmToken &Tok = Parser.getTok();
2395 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2396
2397 StringRef Cond = Tok.getString();
2398 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2399 if (CC == AArch64CC::Invalid)
2400 //return TokError("invalid condition code");
2401 return true;
2402 Parser.Lex(); // Eat identifier token.
2403
2404 if (invertCondCode) {
2405 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2406 //return TokError("condition codes AL and NV are invalid for this instruction");
2407 return true;
2408 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2409 }
2410
2411 Operands.push_back(
2412 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2413 return false;
2414 }
2415
2416 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2417 /// them if present.
2418 AArch64AsmParser::OperandMatchResultTy
tryParseOptionalShiftExtend(OperandVector & Operands)2419 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2420 MCAsmParser &Parser = getParser();
2421 const AsmToken &Tok = Parser.getTok();
2422 std::string LowerID = Tok.getString().lower();
2423 AArch64_AM::ShiftExtendType ShOp =
2424 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2425 .Case("lsl", AArch64_AM::LSL)
2426 .Case("lsr", AArch64_AM::LSR)
2427 .Case("asr", AArch64_AM::ASR)
2428 .Case("ror", AArch64_AM::ROR)
2429 .Case("msl", AArch64_AM::MSL)
2430 .Case("uxtb", AArch64_AM::UXTB)
2431 .Case("uxth", AArch64_AM::UXTH)
2432 .Case("uxtw", AArch64_AM::UXTW)
2433 .Case("uxtx", AArch64_AM::UXTX)
2434 .Case("sxtb", AArch64_AM::SXTB)
2435 .Case("sxth", AArch64_AM::SXTH)
2436 .Case("sxtw", AArch64_AM::SXTW)
2437 .Case("sxtx", AArch64_AM::SXTX)
2438 .Default(AArch64_AM::InvalidShiftExtend);
2439
2440 if (ShOp == AArch64_AM::InvalidShiftExtend)
2441 return MatchOperand_NoMatch;
2442
2443 SMLoc S = Tok.getLoc();
2444 Parser.Lex();
2445
2446 bool Hash = getLexer().is(AsmToken::Hash);
2447 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2448 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2449 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2450 ShOp == AArch64_AM::MSL) {
2451 // We expect a number here.
2452 //TokError("expected #imm after shift specifier");
2453 return MatchOperand_ParseFail;
2454 }
2455
2456 // "extend" type operatoins don't need an immediate, #0 is implicit.
2457 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2458 Operands.push_back(
2459 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2460 return MatchOperand_Success;
2461 }
2462
2463 if (Hash)
2464 Parser.Lex(); // Eat the '#'.
2465
2466 // Make sure we do actually have a number or a parenthesized expression.
2467 SMLoc E = Parser.getTok().getLoc();
2468 if (!Parser.getTok().is(AsmToken::Integer) &&
2469 !Parser.getTok().is(AsmToken::LParen)) {
2470 //Error(E, "expected integer shift amount");
2471 return MatchOperand_ParseFail;
2472 }
2473
2474 const MCExpr *ImmVal;
2475 if (getParser().parseExpression(ImmVal))
2476 return MatchOperand_ParseFail;
2477
2478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2479 if (!MCE) {
2480 //Error(E, "expected constant '#imm' after shift specifier");
2481 return MatchOperand_ParseFail;
2482 }
2483
2484 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2485 Operands.push_back(AArch64Operand::CreateShiftExtend(
2486 ShOp, MCE->getValue(), true, S, E, getContext()));
2487 return MatchOperand_Success;
2488 }
2489
2490 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2491 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
parseSysAlias(StringRef Name,SMLoc NameLoc,OperandVector & Operands)2492 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2493 OperandVector &Operands)
2494 {
2495 if (Name.find('.') != StringRef::npos)
2496 //return TokError("invalid operand");
2497 return true;
2498
2499 Mnemonic = Name;
2500 Operands.push_back(
2501 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2502
2503 MCAsmParser &Parser = getParser();
2504 const AsmToken &Tok = Parser.getTok();
2505 StringRef Op = Tok.getString();
2506 SMLoc S = Tok.getLoc();
2507
2508 const MCExpr *Expr = nullptr;
2509
2510 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2511 do { \
2512 Expr = MCConstantExpr::create(op1, getContext()); \
2513 Operands.push_back( \
2514 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2515 Operands.push_back( \
2516 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2517 Operands.push_back( \
2518 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2519 Expr = MCConstantExpr::create(op2, getContext()); \
2520 Operands.push_back( \
2521 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2522 } while (0)
2523
2524 if (Mnemonic == "ic") {
2525 if (!Op.compare_lower("ialluis")) {
2526 // SYS #0, C7, C1, #0
2527 SYS_ALIAS(0, 7, 1, 0);
2528 } else if (!Op.compare_lower("iallu")) {
2529 // SYS #0, C7, C5, #0
2530 SYS_ALIAS(0, 7, 5, 0);
2531 } else if (!Op.compare_lower("ivau")) {
2532 // SYS #3, C7, C5, #1
2533 SYS_ALIAS(3, 7, 5, 1);
2534 } else {
2535 //return TokError("invalid operand for IC instruction");
2536 return true;
2537 }
2538 } else if (Mnemonic == "dc") {
2539 if (!Op.compare_lower("zva")) {
2540 // SYS #3, C7, C4, #1
2541 SYS_ALIAS(3, 7, 4, 1);
2542 } else if (!Op.compare_lower("ivac")) {
2543 // SYS #3, C7, C6, #1
2544 SYS_ALIAS(0, 7, 6, 1);
2545 } else if (!Op.compare_lower("isw")) {
2546 // SYS #0, C7, C6, #2
2547 SYS_ALIAS(0, 7, 6, 2);
2548 } else if (!Op.compare_lower("cvac")) {
2549 // SYS #3, C7, C10, #1
2550 SYS_ALIAS(3, 7, 10, 1);
2551 } else if (!Op.compare_lower("csw")) {
2552 // SYS #0, C7, C10, #2
2553 SYS_ALIAS(0, 7, 10, 2);
2554 } else if (!Op.compare_lower("cvau")) {
2555 // SYS #3, C7, C11, #1
2556 SYS_ALIAS(3, 7, 11, 1);
2557 } else if (!Op.compare_lower("civac")) {
2558 // SYS #3, C7, C14, #1
2559 SYS_ALIAS(3, 7, 14, 1);
2560 } else if (!Op.compare_lower("cisw")) {
2561 // SYS #0, C7, C14, #2
2562 SYS_ALIAS(0, 7, 14, 2);
2563 } else if (!Op.compare_lower("cvap")) {
2564 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2565 // SYS #3, C7, C12, #1
2566 SYS_ALIAS(3, 7, 12, 1);
2567 } else {
2568 //return TokError("DC CVAP requires ARMv8.2a");
2569 return true;
2570 }
2571 } else {
2572 //return TokError("invalid operand for DC instruction");
2573 return true;
2574 }
2575 } else if (Mnemonic == "at") {
2576 if (!Op.compare_lower("s1e1r")) {
2577 // SYS #0, C7, C8, #0
2578 SYS_ALIAS(0, 7, 8, 0);
2579 } else if (!Op.compare_lower("s1e2r")) {
2580 // SYS #4, C7, C8, #0
2581 SYS_ALIAS(4, 7, 8, 0);
2582 } else if (!Op.compare_lower("s1e3r")) {
2583 // SYS #6, C7, C8, #0
2584 SYS_ALIAS(6, 7, 8, 0);
2585 } else if (!Op.compare_lower("s1e1w")) {
2586 // SYS #0, C7, C8, #1
2587 SYS_ALIAS(0, 7, 8, 1);
2588 } else if (!Op.compare_lower("s1e2w")) {
2589 // SYS #4, C7, C8, #1
2590 SYS_ALIAS(4, 7, 8, 1);
2591 } else if (!Op.compare_lower("s1e3w")) {
2592 // SYS #6, C7, C8, #1
2593 SYS_ALIAS(6, 7, 8, 1);
2594 } else if (!Op.compare_lower("s1e0r")) {
2595 // SYS #0, C7, C8, #3
2596 SYS_ALIAS(0, 7, 8, 2);
2597 } else if (!Op.compare_lower("s1e0w")) {
2598 // SYS #0, C7, C8, #3
2599 SYS_ALIAS(0, 7, 8, 3);
2600 } else if (!Op.compare_lower("s12e1r")) {
2601 // SYS #4, C7, C8, #4
2602 SYS_ALIAS(4, 7, 8, 4);
2603 } else if (!Op.compare_lower("s12e1w")) {
2604 // SYS #4, C7, C8, #5
2605 SYS_ALIAS(4, 7, 8, 5);
2606 } else if (!Op.compare_lower("s12e0r")) {
2607 // SYS #4, C7, C8, #6
2608 SYS_ALIAS(4, 7, 8, 6);
2609 } else if (!Op.compare_lower("s12e0w")) {
2610 // SYS #4, C7, C8, #7
2611 SYS_ALIAS(4, 7, 8, 7);
2612 } else if (!Op.compare_lower("s1e1rp")) {
2613 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2614 // SYS #0, C7, C9, #0
2615 SYS_ALIAS(0, 7, 9, 0);
2616 } else {
2617 //return TokError("AT S1E1RP requires ARMv8.2a");
2618 return true;
2619 }
2620 } else if (!Op.compare_lower("s1e1wp")) {
2621 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2622 // SYS #0, C7, C9, #1
2623 SYS_ALIAS(0, 7, 9, 1);
2624 } else {
2625 //return TokError("AT S1E1WP requires ARMv8.2a");
2626 return true;
2627 }
2628 } else {
2629 //return TokError("invalid operand for AT instruction");
2630 return true;
2631 }
2632 } else if (Mnemonic == "tlbi") {
2633 if (!Op.compare_lower("vmalle1is")) {
2634 // SYS #0, C8, C3, #0
2635 SYS_ALIAS(0, 8, 3, 0);
2636 } else if (!Op.compare_lower("alle2is")) {
2637 // SYS #4, C8, C3, #0
2638 SYS_ALIAS(4, 8, 3, 0);
2639 } else if (!Op.compare_lower("alle3is")) {
2640 // SYS #6, C8, C3, #0
2641 SYS_ALIAS(6, 8, 3, 0);
2642 } else if (!Op.compare_lower("vae1is")) {
2643 // SYS #0, C8, C3, #1
2644 SYS_ALIAS(0, 8, 3, 1);
2645 } else if (!Op.compare_lower("vae2is")) {
2646 // SYS #4, C8, C3, #1
2647 SYS_ALIAS(4, 8, 3, 1);
2648 } else if (!Op.compare_lower("vae3is")) {
2649 // SYS #6, C8, C3, #1
2650 SYS_ALIAS(6, 8, 3, 1);
2651 } else if (!Op.compare_lower("aside1is")) {
2652 // SYS #0, C8, C3, #2
2653 SYS_ALIAS(0, 8, 3, 2);
2654 } else if (!Op.compare_lower("vaae1is")) {
2655 // SYS #0, C8, C3, #3
2656 SYS_ALIAS(0, 8, 3, 3);
2657 } else if (!Op.compare_lower("alle1is")) {
2658 // SYS #4, C8, C3, #4
2659 SYS_ALIAS(4, 8, 3, 4);
2660 } else if (!Op.compare_lower("vale1is")) {
2661 // SYS #0, C8, C3, #5
2662 SYS_ALIAS(0, 8, 3, 5);
2663 } else if (!Op.compare_lower("vaale1is")) {
2664 // SYS #0, C8, C3, #7
2665 SYS_ALIAS(0, 8, 3, 7);
2666 } else if (!Op.compare_lower("vmalle1")) {
2667 // SYS #0, C8, C7, #0
2668 SYS_ALIAS(0, 8, 7, 0);
2669 } else if (!Op.compare_lower("alle2")) {
2670 // SYS #4, C8, C7, #0
2671 SYS_ALIAS(4, 8, 7, 0);
2672 } else if (!Op.compare_lower("vale2is")) {
2673 // SYS #4, C8, C3, #5
2674 SYS_ALIAS(4, 8, 3, 5);
2675 } else if (!Op.compare_lower("vale3is")) {
2676 // SYS #6, C8, C3, #5
2677 SYS_ALIAS(6, 8, 3, 5);
2678 } else if (!Op.compare_lower("alle3")) {
2679 // SYS #6, C8, C7, #0
2680 SYS_ALIAS(6, 8, 7, 0);
2681 } else if (!Op.compare_lower("vae1")) {
2682 // SYS #0, C8, C7, #1
2683 SYS_ALIAS(0, 8, 7, 1);
2684 } else if (!Op.compare_lower("vae2")) {
2685 // SYS #4, C8, C7, #1
2686 SYS_ALIAS(4, 8, 7, 1);
2687 } else if (!Op.compare_lower("vae3")) {
2688 // SYS #6, C8, C7, #1
2689 SYS_ALIAS(6, 8, 7, 1);
2690 } else if (!Op.compare_lower("aside1")) {
2691 // SYS #0, C8, C7, #2
2692 SYS_ALIAS(0, 8, 7, 2);
2693 } else if (!Op.compare_lower("vaae1")) {
2694 // SYS #0, C8, C7, #3
2695 SYS_ALIAS(0, 8, 7, 3);
2696 } else if (!Op.compare_lower("alle1")) {
2697 // SYS #4, C8, C7, #4
2698 SYS_ALIAS(4, 8, 7, 4);
2699 } else if (!Op.compare_lower("vale1")) {
2700 // SYS #0, C8, C7, #5
2701 SYS_ALIAS(0, 8, 7, 5);
2702 } else if (!Op.compare_lower("vale2")) {
2703 // SYS #4, C8, C7, #5
2704 SYS_ALIAS(4, 8, 7, 5);
2705 } else if (!Op.compare_lower("vale3")) {
2706 // SYS #6, C8, C7, #5
2707 SYS_ALIAS(6, 8, 7, 5);
2708 } else if (!Op.compare_lower("vaale1")) {
2709 // SYS #0, C8, C7, #7
2710 SYS_ALIAS(0, 8, 7, 7);
2711 } else if (!Op.compare_lower("ipas2e1")) {
2712 // SYS #4, C8, C4, #1
2713 SYS_ALIAS(4, 8, 4, 1);
2714 } else if (!Op.compare_lower("ipas2le1")) {
2715 // SYS #4, C8, C4, #5
2716 SYS_ALIAS(4, 8, 4, 5);
2717 } else if (!Op.compare_lower("ipas2e1is")) {
2718 // SYS #4, C8, C4, #1
2719 SYS_ALIAS(4, 8, 0, 1);
2720 } else if (!Op.compare_lower("ipas2le1is")) {
2721 // SYS #4, C8, C4, #5
2722 SYS_ALIAS(4, 8, 0, 5);
2723 } else if (!Op.compare_lower("vmalls12e1")) {
2724 // SYS #4, C8, C7, #6
2725 SYS_ALIAS(4, 8, 7, 6);
2726 } else if (!Op.compare_lower("vmalls12e1is")) {
2727 // SYS #4, C8, C3, #6
2728 SYS_ALIAS(4, 8, 3, 6);
2729 } else {
2730 //return TokError("invalid operand for TLBI instruction");
2731 return true;
2732 }
2733 }
2734
2735 #undef SYS_ALIAS
2736
2737 Parser.Lex(); // Eat operand.
2738
2739 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2740 bool HasRegister = false;
2741
2742 // Check for the optional register operand.
2743 if (getLexer().is(AsmToken::Comma)) {
2744 Parser.Lex(); // Eat comma.
2745
2746 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2747 //return TokError("expected register operand");
2748 return true;
2749
2750 HasRegister = true;
2751 }
2752
2753 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2754 Parser.eatToEndOfStatement();
2755 //return TokError("unexpected token in argument list");
2756 return true;
2757 }
2758
2759 if (ExpectRegister && !HasRegister) {
2760 //return TokError("specified " + Mnemonic + " op requires a register");
2761 return true;
2762 }
2763 else if (!ExpectRegister && HasRegister) {
2764 //return TokError("specified " + Mnemonic + " op does not use a register");
2765 return true;
2766 }
2767
2768 Parser.Lex(); // Consume the EndOfStatement
2769 return false;
2770 }
2771
2772 AArch64AsmParser::OperandMatchResultTy
tryParseBarrierOperand(OperandVector & Operands)2773 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands)
2774 {
2775 MCAsmParser &Parser = getParser();
2776 const AsmToken &Tok = Parser.getTok();
2777
2778 // Can be either a #imm style literal or an option name
2779 bool Hash = Tok.is(AsmToken::Hash);
2780 if (Hash || Tok.is(AsmToken::Integer)) {
2781 // Immediate operand.
2782 if (Hash)
2783 Parser.Lex(); // Eat the '#'
2784 const MCExpr *ImmVal;
2785 SMLoc ExprLoc = getLoc();
2786 if (getParser().parseExpression(ImmVal))
2787 return MatchOperand_ParseFail;
2788 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2789 if (!MCE) {
2790 //Error(ExprLoc, "immediate value expected for barrier operand");
2791 return MatchOperand_ParseFail;
2792 }
2793 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2794 //Error(ExprLoc, "barrier operand out of range");
2795 return MatchOperand_ParseFail;
2796 }
2797 bool Valid;
2798 auto Mapper = AArch64DB::DBarrierMapper();
2799 StringRef Name =
2800 Mapper.toString(MCE->getValue(), getSTI().getFeatureBits(), Valid);
2801 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2802 ExprLoc, getContext()));
2803 return MatchOperand_Success;
2804 }
2805
2806 if (Tok.isNot(AsmToken::Identifier)) {
2807 //TokError("invalid operand for instruction");
2808 return MatchOperand_ParseFail;
2809 }
2810
2811 bool Valid;
2812 auto Mapper = AArch64DB::DBarrierMapper();
2813 unsigned Opt =
2814 Mapper.fromString(Tok.getString(), getSTI().getFeatureBits(), Valid);
2815 if (!Valid) {
2816 //TokError("invalid barrier option name");
2817 return MatchOperand_ParseFail;
2818 }
2819
2820 // The only valid named option for ISB is 'sy'
2821 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2822 //TokError("'sy' or #imm operand expected");
2823 return MatchOperand_ParseFail;
2824 }
2825
2826 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2827 getLoc(), getContext()));
2828 Parser.Lex(); // Consume the option
2829
2830 return MatchOperand_Success;
2831 }
2832
2833 AArch64AsmParser::OperandMatchResultTy
tryParseSysReg(OperandVector & Operands)2834 AArch64AsmParser::tryParseSysReg(OperandVector &Operands)
2835 {
2836 MCAsmParser &Parser = getParser();
2837 const AsmToken &Tok = Parser.getTok();
2838
2839 if (Tok.isNot(AsmToken::Identifier))
2840 return MatchOperand_NoMatch;
2841
2842 bool IsKnown;
2843 auto MRSMapper = AArch64SysReg::MRSMapper();
2844 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(),
2845 getSTI().getFeatureBits(), IsKnown);
2846 assert(IsKnown == (MRSReg != -1U) &&
2847 "register should be -1 if and only if it's unknown");
2848
2849 auto MSRMapper = AArch64SysReg::MSRMapper();
2850 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(),
2851 getSTI().getFeatureBits(), IsKnown);
2852 assert(IsKnown == (MSRReg != -1U) &&
2853 "register should be -1 if and only if it's unknown");
2854
2855 auto PStateMapper = AArch64PState::PStateMapper();
2856 uint32_t PStateField =
2857 PStateMapper.fromString(Tok.getString(),
2858 getSTI().getFeatureBits(), IsKnown);
2859 assert(IsKnown == (PStateField != -1U) &&
2860 "register should be -1 if and only if it's unknown");
2861
2862 Operands.push_back(AArch64Operand::CreateSysReg(
2863 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2864 Parser.Lex(); // Eat identifier
2865
2866 return MatchOperand_Success;
2867 }
2868
2869 /// tryParseVectorRegister - Parse a vector register operand.
2870 // return true on error
tryParseVectorRegister(OperandVector & Operands)2871 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands)
2872 {
2873 MCAsmParser &Parser = getParser();
2874 if (Parser.getTok().isNot(AsmToken::Identifier))
2875 return true;
2876
2877 SMLoc S = getLoc();
2878 // Check for a vector register specifier first.
2879 StringRef Kind;
2880 int64_t Reg = tryMatchVectorRegister(Kind, false);
2881 if (Reg == -1)
2882 return true;
2883 Operands.push_back(
2884 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2885 // If there was an explicit qualifier, that goes on as a literal text
2886 // operand.
2887 if (!Kind.empty())
2888 Operands.push_back(
2889 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2890
2891 // If there is an index specifier following the register, parse that too.
2892 if (Parser.getTok().is(AsmToken::LBrac)) {
2893 SMLoc SIdx = getLoc();
2894 Parser.Lex(); // Eat left bracket token.
2895
2896 const MCExpr *ImmVal;
2897 if (getParser().parseExpression(ImmVal))
2898 return false;
2899 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2900 if (!MCE) {
2901 //TokError("immediate value expected for vector index");
2902 return true;
2903 }
2904
2905 SMLoc E = getLoc();
2906 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2907 //Error(E, "']' expected");
2908 return true;
2909 }
2910
2911 Parser.Lex(); // Eat right bracket token.
2912
2913 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2914 E, getContext()));
2915 }
2916
2917 return false;
2918 }
2919
2920 /// parseRegister - Parse a non-vector register operand.
2921 // return true on error
parseRegister(OperandVector & Operands)2922 bool AArch64AsmParser::parseRegister(OperandVector &Operands)
2923 {
2924 MCAsmParser &Parser = getParser();
2925 SMLoc S = getLoc();
2926 // Try for a vector register.
2927 if (!tryParseVectorRegister(Operands))
2928 return false;
2929
2930 // Try for a scalar register.
2931 int64_t Reg = tryParseRegister();
2932 if (Reg == -1)
2933 return true;
2934 Operands.push_back(
2935 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2936
2937 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2938 // as a string token in the instruction itself.
2939 if (getLexer().getKind() == AsmToken::LBrac) {
2940 SMLoc LBracS = getLoc();
2941 Parser.Lex();
2942 const AsmToken &Tok = Parser.getTok();
2943 if (Tok.is(AsmToken::Integer)) {
2944 SMLoc IntS = getLoc();
2945 bool valid;
2946 int64_t Val = Tok.getIntVal(valid);
2947 if (!valid)
2948 return MatchOperand_ParseFail;
2949 if (Val == 1) {
2950 Parser.Lex();
2951 if (getLexer().getKind() == AsmToken::RBrac) {
2952 SMLoc RBracS = getLoc();
2953 Parser.Lex();
2954 Operands.push_back(
2955 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2956 Operands.push_back(
2957 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2958 Operands.push_back(
2959 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2960 return false;
2961 }
2962 }
2963 }
2964 }
2965
2966 return false;
2967 }
2968
parseSymbolicImmVal(const MCExpr * & ImmVal)2969 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal)
2970 {
2971 MCAsmParser &Parser = getParser();
2972 bool HasELFModifier = false;
2973 AArch64MCExpr::VariantKind RefKind;
2974
2975 if (Parser.getTok().is(AsmToken::Colon)) {
2976 Parser.Lex(); // Eat ':"
2977 HasELFModifier = true;
2978
2979 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2980 //Error(Parser.getTok().getLoc(),
2981 // "expect relocation specifier in operand after ':'");
2982 return true;
2983 }
2984
2985 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2986 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2987 .Case("lo12", AArch64MCExpr::VK_LO12)
2988 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2989 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2990 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2991 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2992 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2993 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2994 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2995 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2996 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2997 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2998 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2999 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3000 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3001 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3002 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3003 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3004 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3005 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3006 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3007 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3008 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3009 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3010 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3011 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3012 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3013 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3014 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3015 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3016 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3017 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3018 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3019 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3020 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3021 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3022 .Default(AArch64MCExpr::VK_INVALID);
3023
3024 if (RefKind == AArch64MCExpr::VK_INVALID) {
3025 //Error(Parser.getTok().getLoc(),
3026 // "expect relocation specifier in operand after ':'");
3027 return true;
3028 }
3029
3030 Parser.Lex(); // Eat identifier
3031
3032 if (Parser.getTok().isNot(AsmToken::Colon)) {
3033 //Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3034 return true;
3035 }
3036 Parser.Lex(); // Eat ':'
3037 }
3038
3039 if (getParser().parseExpression(ImmVal))
3040 return true;
3041
3042 if (HasELFModifier)
3043 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3044
3045 return false;
3046 }
3047
3048 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3049 // return true on error
parseVectorList(OperandVector & Operands)3050 bool AArch64AsmParser::parseVectorList(OperandVector &Operands)
3051 {
3052 MCAsmParser &Parser = getParser();
3053 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3054 SMLoc S = getLoc();
3055 Parser.Lex(); // Eat left bracket token.
3056 StringRef Kind;
3057 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3058 if (FirstReg == -1)
3059 return true;
3060 int64_t PrevReg = FirstReg;
3061 unsigned Count = 1;
3062
3063 if (Parser.getTok().is(AsmToken::Minus)) {
3064 Parser.Lex(); // Eat the minus.
3065
3066 //SMLoc Loc = getLoc();
3067 StringRef NextKind;
3068 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3069 if (Reg == -1)
3070 return true;
3071 // Any Kind suffices must match on all regs in the list.
3072 if (Kind != NextKind)
3073 //return Error(Loc, "mismatched register size suffix");
3074 return true;
3075
3076 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3077
3078 if (Space == 0 || Space > 3) {
3079 //return Error(Loc, "invalid number of vectors");
3080 return true;
3081 }
3082
3083 Count += Space;
3084 }
3085 else {
3086 while (Parser.getTok().is(AsmToken::Comma)) {
3087 Parser.Lex(); // Eat the comma token.
3088
3089 //SMLoc Loc = getLoc();
3090 StringRef NextKind;
3091 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3092 if (Reg == -1)
3093 return true;
3094 // Any Kind suffices must match on all regs in the list.
3095 if (Kind != NextKind)
3096 //return Error(Loc, "mismatched register size suffix");
3097 return true;
3098
3099 // Registers must be incremental (with wraparound at 31)
3100 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3101 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3102 //return Error(Loc, "registers must be sequential");
3103 return true;
3104
3105 PrevReg = Reg;
3106 ++Count;
3107 }
3108 }
3109
3110 if (Parser.getTok().isNot(AsmToken::RCurly))
3111 //return Error(getLoc(), "'}' expected");
3112 return true;
3113 Parser.Lex(); // Eat the '}' token.
3114
3115 if (Count > 4)
3116 //return Error(S, "invalid number of vectors");
3117 return true;
3118
3119 unsigned NumElements = 0;
3120 char ElementKind = 0;
3121 if (!Kind.empty())
3122 parseValidVectorKind(Kind, NumElements, ElementKind);
3123
3124 Operands.push_back(AArch64Operand::CreateVectorList(
3125 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3126
3127 // If there is an index specifier following the list, parse that too.
3128 if (Parser.getTok().is(AsmToken::LBrac)) {
3129 SMLoc SIdx = getLoc();
3130 Parser.Lex(); // Eat left bracket token.
3131
3132 const MCExpr *ImmVal;
3133 if (getParser().parseExpression(ImmVal))
3134 return false;
3135 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3136 if (!MCE) {
3137 //TokError("immediate value expected for vector index");
3138 return false;
3139 }
3140
3141 SMLoc E = getLoc();
3142 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3143 //Error(E, "']' expected");
3144 return false;
3145 }
3146
3147 Parser.Lex(); // Eat right bracket token.
3148
3149 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3150 E, getContext()));
3151 }
3152 return false;
3153 }
3154
3155 AArch64AsmParser::OperandMatchResultTy
tryParseGPR64sp0Operand(OperandVector & Operands)3156 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands)
3157 {
3158 MCAsmParser &Parser = getParser();
3159 const AsmToken &Tok = Parser.getTok();
3160 if (!Tok.is(AsmToken::Identifier))
3161 return MatchOperand_NoMatch;
3162
3163 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3164
3165 MCContext &Ctx = getContext();
3166 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3167 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3168 return MatchOperand_NoMatch;
3169
3170 SMLoc S = getLoc();
3171 Parser.Lex(); // Eat register
3172
3173 if (Parser.getTok().isNot(AsmToken::Comma)) {
3174 Operands.push_back(
3175 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3176 return MatchOperand_Success;
3177 }
3178 Parser.Lex(); // Eat comma.
3179
3180 if (Parser.getTok().is(AsmToken::Hash))
3181 Parser.Lex(); // Eat hash
3182
3183 if (Parser.getTok().isNot(AsmToken::Integer)) {
3184 //Error(getLoc(), "index must be absent or #0");
3185 return MatchOperand_ParseFail;
3186 }
3187
3188 const MCExpr *ImmVal;
3189 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3190 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3191 //Error(getLoc(), "index must be absent or #0");
3192 return MatchOperand_ParseFail;
3193 }
3194
3195 Operands.push_back(
3196 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3197 return MatchOperand_Success;
3198 }
3199
3200 /// parseOperand - Parse a arm instruction operand. For now this parses the
3201 /// operand regardless of the mnemonic.
3202 // return true on failure
parseOperand(OperandVector & Operands,bool isCondCode,bool invertCondCode)3203 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3204 bool invertCondCode)
3205 {
3206 MCAsmParser &Parser = getParser();
3207 // Check if the current operand has a custom associated parser, if so, try to
3208 // custom parse the operand, or fallback to the general approach.
3209 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3210 if (ResTy == MatchOperand_Success)
3211 return false;
3212 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3213 // there was a match, but an error occurred, in which case, just return that
3214 // the operand parsing failed.
3215 if (ResTy == MatchOperand_ParseFail)
3216 return true;
3217
3218 // Nothing custom, so do general case parsing.
3219 SMLoc S, E;
3220 switch (getLexer().getKind()) {
3221 default: {
3222 SMLoc S = getLoc();
3223 const MCExpr *Expr;
3224 if (parseSymbolicImmVal(Expr))
3225 //return Error(S, "invalid operand");
3226 return true;
3227
3228 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3229 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3230 return false;
3231 }
3232 case AsmToken::LBrac: {
3233 SMLoc Loc = Parser.getTok().getLoc();
3234 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3235 getContext()));
3236 Parser.Lex(); // Eat '['
3237
3238 // There's no comma after a '[', so we can parse the next operand
3239 // immediately.
3240 return parseOperand(Operands, false, false);
3241 }
3242 case AsmToken::LCurly:
3243 return parseVectorList(Operands);
3244 case AsmToken::Identifier: {
3245 // If we're expecting a Condition Code operand, then just parse that.
3246 if (isCondCode)
3247 return parseCondCode(Operands, invertCondCode);
3248
3249 // If it's a register name, parse it.
3250 if (!parseRegister(Operands))
3251 return false;
3252
3253 // This could be an optional "shift" or "extend" operand.
3254 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3255 // We can only continue if no tokens were eaten.
3256 if (GotShift != MatchOperand_NoMatch)
3257 return GotShift;
3258
3259 // This was not a register so parse other operands that start with an
3260 // identifier (like labels) as expressions and create them as immediates.
3261 const MCExpr *IdVal;
3262 S = getLoc();
3263 if (getParser().parseExpression(IdVal))
3264 return true;
3265
3266 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3267 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3268 return false;
3269 }
3270 case AsmToken::Integer:
3271 case AsmToken::Real:
3272 case AsmToken::Hash: {
3273 // #42 -> immediate.
3274 S = getLoc();
3275 if (getLexer().is(AsmToken::Hash))
3276 Parser.Lex();
3277
3278 // Parse a negative sign
3279 bool isNegative = false;
3280 if (Parser.getTok().is(AsmToken::Minus)) {
3281 isNegative = true;
3282 // We need to consume this token only when we have a Real, otherwise
3283 // we let parseSymbolicImmVal take care of it
3284 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3285 Parser.Lex();
3286 }
3287
3288 // The only Real that should come through here is a literal #0.0 for
3289 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3290 // so convert the value.
3291 const AsmToken &Tok = Parser.getTok();
3292 if (Tok.is(AsmToken::Real)) {
3293 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3294 if (RealVal.bitcastToAPInt().getActiveBits() > 64)
3295 return true;
3296 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3297 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3298 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3299 Mnemonic != "fcmlt")
3300 //return TokError("unexpected floating point literal");
3301 return true;
3302 else if (IntVal != 0 || isNegative)
3303 //return TokError("expected floating-point constant #0.0");
3304 return true;
3305 Parser.Lex(); // Eat the token.
3306
3307 Operands.push_back(
3308 AArch64Operand::CreateToken("#0", false, S, getContext()));
3309 Operands.push_back(
3310 AArch64Operand::CreateToken(".0", false, S, getContext()));
3311 return false;
3312 }
3313
3314 const MCExpr *ImmVal;
3315 if (parseSymbolicImmVal(ImmVal))
3316 return true;
3317
3318 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3319 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3320 return false;
3321 }
3322 case AsmToken::Equal: {
3323 SMLoc Loc = Parser.getTok().getLoc();
3324 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3325 //return Error(Loc, "unexpected token in operand");
3326 return true;
3327 Parser.Lex(); // Eat '='
3328 const MCExpr *SubExprVal;
3329 if (getParser().parseExpression(SubExprVal))
3330 return true;
3331
3332 if (Operands.size() < 2 ||
3333 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3334 //return Error(Loc, "Only valid when first operand is register");
3335 return true;
3336
3337 bool IsXReg =
3338 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3339 Operands[1]->getReg());
3340
3341 MCContext& Ctx = getContext();
3342 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3343 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3344 if (isa<MCConstantExpr>(SubExprVal)) {
3345 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3346 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3347 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3348 ShiftAmt += 16;
3349 Imm >>= 16;
3350 }
3351 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3352 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3353 Operands.push_back(AArch64Operand::CreateImm(
3354 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3355 if (ShiftAmt)
3356 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3357 ShiftAmt, true, S, E, Ctx));
3358 return false;
3359 }
3360 APInt Simm = APInt(64, Imm << ShiftAmt);
3361 // check if the immediate is an unsigned or signed 32-bit int for W regs
3362 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3363 //return Error(Loc, "Immediate too large for register");
3364 return true;
3365 }
3366 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3367 const MCExpr *CPLoc =
3368 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3369 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3370 return false;
3371 }
3372 }
3373 }
3374
3375 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3376 /// operands.
3377 // return true on error
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands,unsigned int & ErrorCode)3378 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3379 StringRef Name, SMLoc NameLoc,
3380 OperandVector &Operands, unsigned int &ErrorCode)
3381 {
3382 MCAsmParser &Parser = getParser();
3383 Name = StringSwitch<StringRef>(Name.lower())
3384 .Case("beq", "b.eq")
3385 .Case("bne", "b.ne")
3386 .Case("bhs", "b.hs")
3387 .Case("bcs", "b.cs")
3388 .Case("blo", "b.lo")
3389 .Case("bcc", "b.cc")
3390 .Case("bmi", "b.mi")
3391 .Case("bpl", "b.pl")
3392 .Case("bvs", "b.vs")
3393 .Case("bvc", "b.vc")
3394 .Case("bhi", "b.hi")
3395 .Case("bls", "b.ls")
3396 .Case("bge", "b.ge")
3397 .Case("blt", "b.lt")
3398 .Case("bgt", "b.gt")
3399 .Case("ble", "b.le")
3400 .Case("bal", "b.al")
3401 .Case("bnv", "b.nv")
3402 .Default(Name);
3403
3404 // First check for the AArch64-specific .req directive.
3405 if (Parser.getTok().is(AsmToken::Identifier) &&
3406 Parser.getTok().getIdentifier() == ".req") {
3407 parseDirectiveReq(Name, NameLoc);
3408 // We always return 'error' for this, as we're done with this
3409 // statement and don't need to match the 'instruction."
3410 return true;
3411 }
3412
3413 // Create the leading tokens for the mnemonic, split by '.' characters.
3414 size_t Start = 0, Next = Name.find('.');
3415 StringRef Head = Name.slice(Start, Next);
3416
3417 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3418 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3419 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3420 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3421 Parser.eatToEndOfStatement();
3422 return IsError;
3423 }
3424
3425 Operands.push_back(
3426 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3427 Mnemonic = Head;
3428
3429 // Handle condition codes for a branch mnemonic
3430 if (Head == "b" && Next != StringRef::npos) {
3431 Start = Next;
3432 Next = Name.find('.', Start + 1);
3433 Head = Name.slice(Start + 1, Next);
3434
3435 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3436 (Head.data() - Name.data()));
3437 AArch64CC::CondCode CC = parseCondCodeString(Head);
3438 if (CC == AArch64CC::Invalid)
3439 //return Error(SuffixLoc, "invalid condition code");
3440 return true;
3441 Operands.push_back(
3442 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3443 Operands.push_back(
3444 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3445 }
3446
3447 // Add the remaining tokens in the mnemonic.
3448 while (Next != StringRef::npos) {
3449 Start = Next;
3450 Next = Name.find('.', Start + 1);
3451 Head = Name.slice(Start, Next);
3452 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3453 (Head.data() - Name.data()) + 1);
3454 Operands.push_back(
3455 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3456 }
3457
3458 // Conditional compare instructions have a Condition Code operand, which needs
3459 // to be parsed and an immediate operand created.
3460 bool condCodeFourthOperand =
3461 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3462 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3463 Head == "csinc" || Head == "csinv" || Head == "csneg");
3464
3465 // These instructions are aliases to some of the conditional select
3466 // instructions. However, the condition code is inverted in the aliased
3467 // instruction.
3468 //
3469 // FIXME: Is this the correct way to handle these? Or should the parser
3470 // generate the aliased instructions directly?
3471 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3472 bool condCodeThirdOperand =
3473 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3474
3475 // Read the remaining operands.
3476 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3477 // Read the first operand.
3478 if (parseOperand(Operands, false, false)) {
3479 Parser.eatToEndOfStatement();
3480 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3481 return true;
3482 }
3483
3484 unsigned N = 2;
3485 while (getLexer().is(AsmToken::Comma)) {
3486 Parser.Lex(); // Eat the comma.
3487
3488 // Parse and remember the operand.
3489 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3490 (N == 3 && condCodeThirdOperand) ||
3491 (N == 2 && condCodeSecondOperand),
3492 condCodeSecondOperand || condCodeThirdOperand)) {
3493 Parser.eatToEndOfStatement();
3494 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3495 return true;
3496 }
3497
3498 // After successfully parsing some operands there are two special cases to
3499 // consider (i.e. notional operands not separated by commas). Both are due
3500 // to memory specifiers:
3501 // + An RBrac will end an address for load/store/prefetch
3502 // + An '!' will indicate a pre-indexed operation.
3503 //
3504 // It's someone else's responsibility to make sure these tokens are sane
3505 // in the given context!
3506 if (Parser.getTok().is(AsmToken::RBrac)) {
3507 SMLoc Loc = Parser.getTok().getLoc();
3508 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3509 getContext()));
3510 Parser.Lex();
3511 }
3512
3513 if (Parser.getTok().is(AsmToken::Exclaim)) {
3514 SMLoc Loc = Parser.getTok().getLoc();
3515 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3516 getContext()));
3517 Parser.Lex();
3518 }
3519
3520 ++N;
3521 }
3522 }
3523
3524 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3525 //SMLoc Loc = Parser.getTok().getLoc();
3526 Parser.eatToEndOfStatement();
3527 //return Error(Loc, "unexpected token in argument list");
3528 return true;
3529 }
3530
3531 Parser.Lex(); // Consume the EndOfStatement
3532 return false;
3533 }
3534
3535 // FIXME: This entire function is a giant hack to provide us with decent
3536 // operand range validation/diagnostics until TableGen/MC can be extended
3537 // to support autogeneration of this kind of validation.
validateInstruction(MCInst & Inst,SmallVectorImpl<SMLoc> & Loc)3538 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3539 SmallVectorImpl<SMLoc> &Loc)
3540 {
3541 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3542 // Check for indexed addressing modes w/ the base register being the
3543 // same as a destination/source register or pair load where
3544 // the Rt == Rt2. All of those are undefined behaviour.
3545 switch (Inst.getOpcode()) {
3546 case AArch64::LDPSWpre:
3547 case AArch64::LDPWpost:
3548 case AArch64::LDPWpre:
3549 case AArch64::LDPXpost:
3550 case AArch64::LDPXpre: {
3551 unsigned Rt = Inst.getOperand(1).getReg();
3552 unsigned Rt2 = Inst.getOperand(2).getReg();
3553 unsigned Rn = Inst.getOperand(3).getReg();
3554 if (RI->isSubRegisterEq(Rn, Rt))
3555 //return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3556 // "is also a destination");
3557 return true;
3558 if (RI->isSubRegisterEq(Rn, Rt2))
3559 //return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3560 // "is also a destination");
3561 return true;
3562 // FALLTHROUGH
3563 }
3564 case AArch64::LDPDi:
3565 case AArch64::LDPQi:
3566 case AArch64::LDPSi:
3567 case AArch64::LDPSWi:
3568 case AArch64::LDPWi:
3569 case AArch64::LDPXi: {
3570 unsigned Rt = Inst.getOperand(0).getReg();
3571 unsigned Rt2 = Inst.getOperand(1).getReg();
3572 if (Rt == Rt2)
3573 //return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3574 return true;
3575 break;
3576 }
3577 case AArch64::LDPDpost:
3578 case AArch64::LDPDpre:
3579 case AArch64::LDPQpost:
3580 case AArch64::LDPQpre:
3581 case AArch64::LDPSpost:
3582 case AArch64::LDPSpre:
3583 case AArch64::LDPSWpost: {
3584 unsigned Rt = Inst.getOperand(1).getReg();
3585 unsigned Rt2 = Inst.getOperand(2).getReg();
3586 if (Rt == Rt2)
3587 //return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3588 return true;
3589 break;
3590 }
3591 case AArch64::STPDpost:
3592 case AArch64::STPDpre:
3593 case AArch64::STPQpost:
3594 case AArch64::STPQpre:
3595 case AArch64::STPSpost:
3596 case AArch64::STPSpre:
3597 case AArch64::STPWpost:
3598 case AArch64::STPWpre:
3599 case AArch64::STPXpost:
3600 case AArch64::STPXpre: {
3601 unsigned Rt = Inst.getOperand(1).getReg();
3602 unsigned Rt2 = Inst.getOperand(2).getReg();
3603 unsigned Rn = Inst.getOperand(3).getReg();
3604 if (RI->isSubRegisterEq(Rn, Rt))
3605 //return Error(Loc[0], "unpredictable STP instruction, writeback base "
3606 // "is also a source");
3607 return true;
3608 if (RI->isSubRegisterEq(Rn, Rt2))
3609 //return Error(Loc[1], "unpredictable STP instruction, writeback base "
3610 // "is also a source");
3611 return true;
3612 break;
3613 }
3614 case AArch64::LDRBBpre:
3615 case AArch64::LDRBpre:
3616 case AArch64::LDRHHpre:
3617 case AArch64::LDRHpre:
3618 case AArch64::LDRSBWpre:
3619 case AArch64::LDRSBXpre:
3620 case AArch64::LDRSHWpre:
3621 case AArch64::LDRSHXpre:
3622 case AArch64::LDRSWpre:
3623 case AArch64::LDRWpre:
3624 case AArch64::LDRXpre:
3625 case AArch64::LDRBBpost:
3626 case AArch64::LDRBpost:
3627 case AArch64::LDRHHpost:
3628 case AArch64::LDRHpost:
3629 case AArch64::LDRSBWpost:
3630 case AArch64::LDRSBXpost:
3631 case AArch64::LDRSHWpost:
3632 case AArch64::LDRSHXpost:
3633 case AArch64::LDRSWpost:
3634 case AArch64::LDRWpost:
3635 case AArch64::LDRXpost: {
3636 unsigned Rt = Inst.getOperand(1).getReg();
3637 unsigned Rn = Inst.getOperand(2).getReg();
3638 if (RI->isSubRegisterEq(Rn, Rt))
3639 //return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3640 // "is also a source");
3641 return true;
3642 break;
3643 }
3644 case AArch64::STRBBpost:
3645 case AArch64::STRBpost:
3646 case AArch64::STRHHpost:
3647 case AArch64::STRHpost:
3648 case AArch64::STRWpost:
3649 case AArch64::STRXpost:
3650 case AArch64::STRBBpre:
3651 case AArch64::STRBpre:
3652 case AArch64::STRHHpre:
3653 case AArch64::STRHpre:
3654 case AArch64::STRWpre:
3655 case AArch64::STRXpre: {
3656 unsigned Rt = Inst.getOperand(1).getReg();
3657 unsigned Rn = Inst.getOperand(2).getReg();
3658 if (RI->isSubRegisterEq(Rn, Rt))
3659 //return Error(Loc[0], "unpredictable STR instruction, writeback base "
3660 // "is also a source");
3661 return true;
3662 break;
3663 }
3664 }
3665
3666 // Now check immediate ranges. Separate from the above as there is overlap
3667 // in the instructions being checked and this keeps the nested conditionals
3668 // to a minimum.
3669 switch (Inst.getOpcode()) {
3670 case AArch64::ADDSWri:
3671 case AArch64::ADDSXri:
3672 case AArch64::ADDWri:
3673 case AArch64::ADDXri:
3674 case AArch64::SUBSWri:
3675 case AArch64::SUBSXri:
3676 case AArch64::SUBWri:
3677 case AArch64::SUBXri: {
3678 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3679 // some slight duplication here.
3680 if (Inst.getOperand(2).isExpr()) {
3681 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3682 AArch64MCExpr::VariantKind ELFRefKind;
3683 MCSymbolRefExpr::VariantKind DarwinRefKind;
3684 int64_t Addend;
3685 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3686 //return Error(Loc[2], "invalid immediate expression");
3687 return true;
3688 }
3689
3690 // Only allow these with ADDXri.
3691 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3692 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3693 Inst.getOpcode() == AArch64::ADDXri)
3694 return false;
3695
3696 // Only allow these with ADDXri/ADDWri
3697 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3698 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3699 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3700 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3701 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3702 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3703 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3704 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3705 (Inst.getOpcode() == AArch64::ADDXri ||
3706 Inst.getOpcode() == AArch64::ADDWri))
3707 return false;
3708
3709 // Don't allow expressions in the immediate field otherwise
3710 //return Error(Loc[2], "invalid immediate expression");
3711 return true;
3712 }
3713 return false;
3714 }
3715 default:
3716 return false;
3717 }
3718 }
3719
showMatchError(SMLoc Loc,unsigned ErrCode)3720 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode)
3721 {
3722 switch (ErrCode) {
3723 case Match_MissingFeature:
3724 //return Error(Loc,
3725 // "instruction requires a CPU feature not currently enabled");
3726 return true;
3727 case Match_InvalidOperand:
3728 //return Error(Loc, "invalid operand for instruction");
3729 return true;
3730 case Match_InvalidSuffix:
3731 //return Error(Loc, "invalid type suffix for instruction");
3732 return true;
3733 case Match_InvalidCondCode:
3734 //return Error(Loc, "expected AArch64 condition code");
3735 return true;
3736 case Match_AddSubRegExtendSmall:
3737 //return Error(Loc,
3738 // "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3739 return true;
3740 case Match_AddSubRegExtendLarge:
3741 //return Error(Loc,
3742 // "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3743 return true;
3744 case Match_AddSubSecondSource:
3745 //return Error(Loc,
3746 // "expected compatible register, symbol or integer in range [0, 4095]");
3747 return true;
3748 case Match_LogicalSecondSource:
3749 //return Error(Loc, "expected compatible register or logical immediate");
3750 return true;
3751 case Match_InvalidMovImm32Shift:
3752 //return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3753 return true;
3754 case Match_InvalidMovImm64Shift:
3755 //return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3756 return true;
3757 case Match_AddSubRegShift32:
3758 //return Error(Loc,
3759 // "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3760 return true;
3761 case Match_AddSubRegShift64:
3762 //return Error(Loc,
3763 // "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3764 return true;
3765 case Match_InvalidFPImm:
3766 //return Error(Loc,
3767 // "expected compatible register or floating-point constant");
3768 return true;
3769 case Match_InvalidMemoryIndexedSImm9:
3770 //return Error(Loc, "index must be an integer in range [-256, 255].");
3771 return true;
3772 case Match_InvalidMemoryIndexed4SImm7:
3773 //return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3774 return true;
3775 case Match_InvalidMemoryIndexed8SImm7:
3776 //return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3777 return true;
3778 case Match_InvalidMemoryIndexed16SImm7:
3779 //return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3780 return true;
3781 case Match_InvalidMemoryWExtend8:
3782 //return Error(Loc,
3783 // "expected 'uxtw' or 'sxtw' with optional shift of #0");
3784 return true;
3785 case Match_InvalidMemoryWExtend16:
3786 //return Error(Loc,
3787 // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3788 return true;
3789 case Match_InvalidMemoryWExtend32:
3790 //return Error(Loc,
3791 // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3792 return true;
3793 case Match_InvalidMemoryWExtend64:
3794 //return Error(Loc,
3795 // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3796 return true;
3797 case Match_InvalidMemoryWExtend128:
3798 //return Error(Loc,
3799 // "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3800 return true;
3801 case Match_InvalidMemoryXExtend8:
3802 //return Error(Loc,
3803 // "expected 'lsl' or 'sxtx' with optional shift of #0");
3804 return true;
3805 case Match_InvalidMemoryXExtend16:
3806 //return Error(Loc,
3807 // "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3808 return true;
3809 case Match_InvalidMemoryXExtend32:
3810 //return Error(Loc,
3811 // "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3812 return true;
3813 case Match_InvalidMemoryXExtend64:
3814 //return Error(Loc,
3815 // "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3816 return true;
3817 case Match_InvalidMemoryXExtend128:
3818 //return Error(Loc,
3819 // "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3820 return true;
3821 case Match_InvalidMemoryIndexed1:
3822 //return Error(Loc, "index must be an integer in range [0, 4095].");
3823 return true;
3824 case Match_InvalidMemoryIndexed2:
3825 //return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3826 return true;
3827 case Match_InvalidMemoryIndexed4:
3828 //return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3829 return true;
3830 case Match_InvalidMemoryIndexed8:
3831 //return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3832 return true;
3833 case Match_InvalidMemoryIndexed16:
3834 //return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3835 return true;
3836 case Match_InvalidImm0_1:
3837 //return Error(Loc, "immediate must be an integer in range [0, 1].");
3838 return true;
3839 case Match_InvalidImm0_7:
3840 //return Error(Loc, "immediate must be an integer in range [0, 7].");
3841 return true;
3842 case Match_InvalidImm0_15:
3843 //return Error(Loc, "immediate must be an integer in range [0, 15].");
3844 return true;
3845 case Match_InvalidImm0_31:
3846 //return Error(Loc, "immediate must be an integer in range [0, 31].");
3847 return true;
3848 case Match_InvalidImm0_63:
3849 //return Error(Loc, "immediate must be an integer in range [0, 63].");
3850 return true;
3851 case Match_InvalidImm0_127:
3852 //return Error(Loc, "immediate must be an integer in range [0, 127].");
3853 return true;
3854 case Match_InvalidImm0_65535:
3855 //return Error(Loc, "immediate must be an integer in range [0, 65535].");
3856 return true;
3857 case Match_InvalidImm1_8:
3858 //return Error(Loc, "immediate must be an integer in range [1, 8].");
3859 return true;
3860 case Match_InvalidImm1_16:
3861 //return Error(Loc, "immediate must be an integer in range [1, 16].");
3862 return true;
3863 case Match_InvalidImm1_32:
3864 //return Error(Loc, "immediate must be an integer in range [1, 32].");
3865 return true;
3866 case Match_InvalidImm1_64:
3867 //return Error(Loc, "immediate must be an integer in range [1, 64].");
3868 return true;
3869 case Match_InvalidIndex1:
3870 //return Error(Loc, "expected lane specifier '[1]'");
3871 return true;
3872 case Match_InvalidIndexB:
3873 //return Error(Loc, "vector lane must be an integer in range [0, 15].");
3874 return true;
3875 case Match_InvalidIndexH:
3876 //return Error(Loc, "vector lane must be an integer in range [0, 7].");
3877 return true;
3878 case Match_InvalidIndexS:
3879 //return Error(Loc, "vector lane must be an integer in range [0, 3].");
3880 return true;
3881 case Match_InvalidIndexD:
3882 //return Error(Loc, "vector lane must be an integer in range [0, 1].");
3883 return true;
3884 case Match_InvalidLabel:
3885 //return Error(Loc, "expected label or encodable integer pc offset");
3886 return true;
3887 case Match_MRS:
3888 //return Error(Loc, "expected readable system register");
3889 return true;
3890 case Match_MSR:
3891 //return Error(Loc, "expected writable system register or pstate");
3892 return true;
3893 case Match_MnemonicFail:
3894 //return Error(Loc, "unrecognized instruction mnemonic");
3895 return true;
3896 default:
3897 llvm_unreachable("unexpected error code!");
3898 }
3899 }
3900
3901 static const char *getSubtargetFeatureName(uint64_t Val);
3902
3903 // return True on error
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm,unsigned int & ErrorCode,uint64_t & Address)3904 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3905 OperandVector &Operands,
3906 MCStreamer &Out,
3907 uint64_t &ErrorInfo,
3908 bool MatchingInlineAsm, unsigned int &ErrorCode, uint64_t &Address)
3909 {
3910 assert(!Operands.empty() && "Unexpect empty operand list!");
3911 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3912 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3913
3914 StringRef Tok = Op.getToken();
3915 unsigned NumOperands = Operands.size();
3916
3917 if (NumOperands == 4 && Tok == "lsl") {
3918 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3919 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3920 if (Op2.isReg() && Op3.isImm()) {
3921 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3922 if (Op3CE) {
3923 uint64_t Op3Val = Op3CE->getValue();
3924 uint64_t NewOp3Val = 0;
3925 uint64_t NewOp4Val = 0;
3926 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3927 Op2.getReg())) {
3928 NewOp3Val = (32 - Op3Val) & 0x1f;
3929 NewOp4Val = 31 - Op3Val;
3930 } else {
3931 NewOp3Val = (64 - Op3Val) & 0x3f;
3932 NewOp4Val = 63 - Op3Val;
3933 }
3934
3935 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3936 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3937
3938 Operands[0] = AArch64Operand::CreateToken(
3939 "ubfm", false, Op.getStartLoc(), getContext());
3940 Operands.push_back(AArch64Operand::CreateImm(
3941 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3942 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3943 Op3.getEndLoc(), getContext());
3944 }
3945 }
3946 } else if (NumOperands == 4 && Tok == "bfc") {
3947 // FIXME: Horrible hack to handle BFC->BFM alias.
3948 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3949 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3950 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3951
3952 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3953 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3954 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3955
3956 if (LSBCE && WidthCE) {
3957 uint64_t LSB = LSBCE->getValue();
3958 uint64_t Width = WidthCE->getValue();
3959
3960 uint64_t RegWidth = 0;
3961 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3962 Op1.getReg()))
3963 RegWidth = 64;
3964 else
3965 RegWidth = 32;
3966
3967 if (LSB >= RegWidth) {
3968 //return Error(LSBOp.getStartLoc(),
3969 // "expected integer in range [0, 31]");
3970 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3971 return true;
3972 }
3973 if (Width < 1 || Width > RegWidth) {
3974 //return Error(WidthOp.getStartLoc(),
3975 // "expected integer in range [1, 32]");
3976 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3977 return true;
3978 }
3979
3980 uint64_t ImmR = 0;
3981 if (RegWidth == 32)
3982 ImmR = (32 - LSB) & 0x1f;
3983 else
3984 ImmR = (64 - LSB) & 0x3f;
3985
3986 uint64_t ImmS = Width - 1;
3987
3988 if (ImmR != 0 && ImmS >= ImmR) {
3989 //return Error(WidthOp.getStartLoc(),
3990 // "requested insert overflows register");
3991 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
3992 return true;
3993 }
3994
3995 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3996 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3997 Operands[0] = AArch64Operand::CreateToken(
3998 "bfm", false, Op.getStartLoc(), getContext());
3999 Operands[2] = AArch64Operand::CreateReg(
4000 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
4001 SMLoc(), getContext());
4002 Operands[3] = AArch64Operand::CreateImm(
4003 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4004 Operands.emplace_back(
4005 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4006 WidthOp.getEndLoc(), getContext()));
4007 }
4008 }
4009 } else if (NumOperands == 5) {
4010 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4011 // UBFIZ -> UBFM aliases.
4012 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4013 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4014 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4015 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4016
4017 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
4018 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4019 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4020
4021 if (Op3CE && Op4CE) {
4022 uint64_t Op3Val = Op3CE->getValue();
4023 uint64_t Op4Val = Op4CE->getValue();
4024
4025 uint64_t RegWidth = 0;
4026 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4027 Op1.getReg()))
4028 RegWidth = 64;
4029 else
4030 RegWidth = 32;
4031
4032 if (Op3Val >= RegWidth) {
4033 //return Error(Op3.getStartLoc(),
4034 // "expected integer in range [0, 31]");
4035 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4036 return true;
4037 }
4038 if (Op4Val < 1 || Op4Val > RegWidth) {
4039 //return Error(Op4.getStartLoc(),
4040 // "expected integer in range [1, 32]");
4041 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4042 return true;
4043 }
4044
4045 uint64_t NewOp3Val = 0;
4046 if (RegWidth == 32)
4047 NewOp3Val = (32 - Op3Val) & 0x1f;
4048 else
4049 NewOp3Val = (64 - Op3Val) & 0x3f;
4050
4051 uint64_t NewOp4Val = Op4Val - 1;
4052
4053 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val) {
4054 //return Error(Op4.getStartLoc(),
4055 // "requested insert overflows register");
4056 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4057 return true;
4058 }
4059
4060 const MCExpr *NewOp3 =
4061 MCConstantExpr::create(NewOp3Val, getContext());
4062 const MCExpr *NewOp4 =
4063 MCConstantExpr::create(NewOp4Val, getContext());
4064 Operands[3] = AArch64Operand::CreateImm(
4065 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4066 Operands[4] = AArch64Operand::CreateImm(
4067 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4068 if (Tok == "bfi")
4069 Operands[0] = AArch64Operand::CreateToken(
4070 "bfm", false, Op.getStartLoc(), getContext());
4071 else if (Tok == "sbfiz")
4072 Operands[0] = AArch64Operand::CreateToken(
4073 "sbfm", false, Op.getStartLoc(), getContext());
4074 else if (Tok == "ubfiz")
4075 Operands[0] = AArch64Operand::CreateToken(
4076 "ubfm", false, Op.getStartLoc(), getContext());
4077 else
4078 llvm_unreachable("No valid mnemonic for alias?");
4079 }
4080 }
4081
4082 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4083 // UBFX -> UBFM aliases.
4084 } else if (NumOperands == 5 &&
4085 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4086 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4087 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4088 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4089
4090 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
4091 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4092 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4093
4094 if (Op3CE && Op4CE) {
4095 uint64_t Op3Val = Op3CE->getValue();
4096 uint64_t Op4Val = Op4CE->getValue();
4097
4098 uint64_t RegWidth = 0;
4099 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4100 Op1.getReg()))
4101 RegWidth = 64;
4102 else
4103 RegWidth = 32;
4104
4105 if (Op3Val >= RegWidth) {
4106 // TODO: save ErrorCode
4107 //return Error(Op3.getStartLoc(),
4108 // "expected integer in range [0, 31]");
4109 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4110 return true;
4111 }
4112 if (Op4Val < 1 || Op4Val > RegWidth) {
4113 // TODO: save ErrorCode
4114 //return Error(Op4.getStartLoc(),
4115 // "expected integer in range [1, 32]");
4116 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4117 return true;
4118 }
4119
4120 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4121
4122 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val) {
4123 // TODO: save ErrorCode
4124 //return Error(Op4.getStartLoc(),
4125 // "requested extract overflows register");
4126 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4127 return true;
4128 }
4129
4130 const MCExpr *NewOp4 =
4131 MCConstantExpr::create(NewOp4Val, getContext());
4132 Operands[4] = AArch64Operand::CreateImm(
4133 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4134 if (Tok == "bfxil")
4135 Operands[0] = AArch64Operand::CreateToken(
4136 "bfm", false, Op.getStartLoc(), getContext());
4137 else if (Tok == "sbfx")
4138 Operands[0] = AArch64Operand::CreateToken(
4139 "sbfm", false, Op.getStartLoc(), getContext());
4140 else if (Tok == "ubfx")
4141 Operands[0] = AArch64Operand::CreateToken(
4142 "ubfm", false, Op.getStartLoc(), getContext());
4143 else
4144 llvm_unreachable("No valid mnemonic for alias?");
4145 }
4146 }
4147 }
4148 }
4149 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4150 // InstAlias can't quite handle this since the reg classes aren't
4151 // subclasses.
4152 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4153 // The source register can be Wn here, but the matcher expects a
4154 // GPR64. Twiddle it here if necessary.
4155 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4156 if (Op.isReg()) {
4157 unsigned Reg = getXRegFromWReg(Op.getReg());
4158 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4159 Op.getEndLoc(), getContext());
4160 }
4161 }
4162 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4163 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4164 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4165 if (Op.isReg() &&
4166 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4167 Op.getReg())) {
4168 // The source register can be Wn here, but the matcher expects a
4169 // GPR64. Twiddle it here if necessary.
4170 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4171 if (Op.isReg()) {
4172 unsigned Reg = getXRegFromWReg(Op.getReg());
4173 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4174 Op.getEndLoc(), getContext());
4175 }
4176 }
4177 }
4178 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4179 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4180 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4181 if (Op.isReg() &&
4182 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4183 Op.getReg())) {
4184 // The source register can be Wn here, but the matcher expects a
4185 // GPR32. Twiddle it here if necessary.
4186 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4187 if (Op.isReg()) {
4188 unsigned Reg = getWRegFromXReg(Op.getReg());
4189 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
4190 Op.getEndLoc(), getContext());
4191 }
4192 }
4193 }
4194
4195 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4196 if (NumOperands == 3 && Tok == "fmov") {
4197 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
4198 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
4199 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
4200 unsigned zreg =
4201 !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
4202 RegOp.getReg())
4203 ? AArch64::WZR
4204 : AArch64::XZR;
4205 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
4206 Op.getEndLoc(), getContext());
4207 }
4208 }
4209
4210 MCInst Inst(Address);
4211 // First try to match against the secondary set of tables containing the
4212 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4213 unsigned MatchResult =
4214 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4215
4216 // If that fails, try against the alternate table containing long-form NEON:
4217 // "fadd v0.2s, v1.2s, v2.2s"
4218 if (MatchResult != Match_Success) {
4219 // But first, save the short-form match result: we can use it in case the
4220 // long-form match also fails.
4221 auto ShortFormNEONErrorInfo = ErrorInfo;
4222 auto ShortFormNEONMatchResult = MatchResult;
4223
4224 MatchResult =
4225 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4226
4227 // Now, both matches failed, and the long-form match failed on the mnemonic
4228 // suffix token operand. The short-form match failure is probably more
4229 // relevant: use it instead.
4230 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4231 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4232 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4233 MatchResult = ShortFormNEONMatchResult;
4234 ErrorInfo = ShortFormNEONErrorInfo;
4235 }
4236 }
4237
4238 // save the error code
4239 ErrorCode = MatchResult;
4240
4241 switch (MatchResult) {
4242 case Match_Success: {
4243 // Perform range checking and other semantic validations
4244 SmallVector<SMLoc, 8> OperandLocs;
4245 NumOperands = Operands.size();
4246 for (unsigned i = 1; i < NumOperands; ++i)
4247 OperandLocs.push_back(Operands[i]->getStartLoc());
4248 if (validateInstruction(Inst, OperandLocs))
4249 return true;
4250
4251 Inst.setLoc(IDLoc);
4252 Out.EmitInstruction(Inst, getSTI(), ErrorCode);
4253 if (ErrorCode == 0) {
4254 Address = Inst.getAddress(); // Keystone update address
4255 return false;
4256 } else
4257 return true;
4258 }
4259 case Match_MissingFeature: {
4260 assert(ErrorInfo && "Unknown missing feature!");
4261 // Special case the error message for the very common case where only
4262 // a single subtarget feature is missing (neon, e.g.).
4263 std::string Msg = "instruction requires:";
4264 uint64_t Mask = 1;
4265 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4266 if (ErrorInfo & Mask) {
4267 Msg += " ";
4268 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4269 }
4270 Mask <<= 1;
4271 }
4272 //return Error(IDLoc, Msg);
4273 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4274 return true;
4275 }
4276 case Match_MnemonicFail:
4277 return showMatchError(IDLoc, MatchResult);
4278 case Match_InvalidOperand: {
4279 SMLoc ErrorLoc = IDLoc;
4280
4281 if (ErrorInfo != ~0ULL) {
4282 if (ErrorInfo >= Operands.size()) {
4283 //return Error(IDLoc, "too few operands for instruction");
4284 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4285 return true;
4286 }
4287
4288 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4289 if (ErrorLoc == SMLoc())
4290 ErrorLoc = IDLoc;
4291 }
4292 // If the match failed on a suffix token operand, tweak the diagnostic
4293 // accordingly.
4294 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4295 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4296 MatchResult = Match_InvalidSuffix;
4297
4298 return showMatchError(ErrorLoc, MatchResult);
4299 }
4300 case Match_InvalidMemoryIndexed1:
4301 case Match_InvalidMemoryIndexed2:
4302 case Match_InvalidMemoryIndexed4:
4303 case Match_InvalidMemoryIndexed8:
4304 case Match_InvalidMemoryIndexed16:
4305 case Match_InvalidCondCode:
4306 case Match_AddSubRegExtendSmall:
4307 case Match_AddSubRegExtendLarge:
4308 case Match_AddSubSecondSource:
4309 case Match_LogicalSecondSource:
4310 case Match_AddSubRegShift32:
4311 case Match_AddSubRegShift64:
4312 case Match_InvalidMovImm32Shift:
4313 case Match_InvalidMovImm64Shift:
4314 case Match_InvalidFPImm:
4315 case Match_InvalidMemoryWExtend8:
4316 case Match_InvalidMemoryWExtend16:
4317 case Match_InvalidMemoryWExtend32:
4318 case Match_InvalidMemoryWExtend64:
4319 case Match_InvalidMemoryWExtend128:
4320 case Match_InvalidMemoryXExtend8:
4321 case Match_InvalidMemoryXExtend16:
4322 case Match_InvalidMemoryXExtend32:
4323 case Match_InvalidMemoryXExtend64:
4324 case Match_InvalidMemoryXExtend128:
4325 case Match_InvalidMemoryIndexed4SImm7:
4326 case Match_InvalidMemoryIndexed8SImm7:
4327 case Match_InvalidMemoryIndexed16SImm7:
4328 case Match_InvalidMemoryIndexedSImm9:
4329 case Match_InvalidImm0_1:
4330 case Match_InvalidImm0_7:
4331 case Match_InvalidImm0_15:
4332 case Match_InvalidImm0_31:
4333 case Match_InvalidImm0_63:
4334 case Match_InvalidImm0_127:
4335 case Match_InvalidImm0_65535:
4336 case Match_InvalidImm1_8:
4337 case Match_InvalidImm1_16:
4338 case Match_InvalidImm1_32:
4339 case Match_InvalidImm1_64:
4340 case Match_InvalidIndex1:
4341 case Match_InvalidIndexB:
4342 case Match_InvalidIndexH:
4343 case Match_InvalidIndexS:
4344 case Match_InvalidIndexD:
4345 case Match_InvalidLabel:
4346 case Match_MSR:
4347 case Match_MRS: {
4348 if (ErrorInfo >= Operands.size()) {
4349 //return Error(IDLoc, "too few operands for instruction");
4350 ErrorCode = KS_ERR_ASM_INVALIDOPERAND;
4351 return true;
4352 }
4353 // Any time we get here, there's nothing fancy to do. Just get the
4354 // operand SMLoc and display the diagnostic.
4355 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4356 if (ErrorLoc == SMLoc())
4357 ErrorLoc = IDLoc;
4358 return showMatchError(ErrorLoc, MatchResult);
4359 }
4360 }
4361
4362 llvm_unreachable("Implement any new match types added!");
4363 }
4364
4365 /// ParseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)4366 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4367 const MCObjectFileInfo::Environment Format =
4368 getContext().getObjectFileInfo()->getObjectFileType();
4369 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4370 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4371
4372 StringRef IDVal = DirectiveID.getIdentifier();
4373 SMLoc Loc = DirectiveID.getLoc();
4374 if (IDVal == ".hword")
4375 return parseDirectiveWord(2, Loc);
4376 if (IDVal == ".word")
4377 return parseDirectiveWord(4, Loc);
4378 if (IDVal == ".xword")
4379 return parseDirectiveWord(8, Loc);
4380 if (IDVal == ".tlsdesccall")
4381 return parseDirectiveTLSDescCall(Loc);
4382 if (IDVal == ".ltorg" || IDVal == ".pool")
4383 return parseDirectiveLtorg(Loc);
4384 if (IDVal == ".unreq")
4385 return parseDirectiveUnreq(Loc);
4386
4387 if (!IsMachO && !IsCOFF) {
4388 if (IDVal == ".inst")
4389 return parseDirectiveInst(Loc);
4390 }
4391
4392 return parseDirectiveLOH(IDVal, Loc);
4393 }
4394
4395 /// parseDirectiveWord
4396 /// ::= .word [ expression (, expression)* ]
parseDirectiveWord(unsigned Size,SMLoc L)4397 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L)
4398 {
4399 MCAsmParser &Parser = getParser();
4400 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4401 for (;;) {
4402 const MCExpr *Value;
4403 if (getParser().parseExpression(Value))
4404 return true;
4405
4406 getParser().getStreamer().EmitValue(Value, Size, L);
4407
4408 if (getLexer().is(AsmToken::EndOfStatement))
4409 break;
4410
4411 // FIXME: Improve diagnostic.
4412 if (getLexer().isNot(AsmToken::Comma))
4413 //return Error(L, "unexpected token in directive");
4414 return true;
4415 Parser.Lex();
4416 }
4417 }
4418
4419 Parser.Lex();
4420 return false;
4421 }
4422
4423 /// parseDirectiveInst
4424 /// ::= .inst opcode [, ...]
parseDirectiveInst(SMLoc Loc)4425 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc)
4426 {
4427 MCAsmParser &Parser = getParser();
4428 if (getLexer().is(AsmToken::EndOfStatement)) {
4429 Parser.eatToEndOfStatement();
4430 //Error(Loc, "expected expression following directive");
4431 return false;
4432 }
4433
4434 for (;;) {
4435 const MCExpr *Expr;
4436
4437 if (getParser().parseExpression(Expr)) {
4438 //Error(Loc, "expected expression");
4439 return false;
4440 }
4441
4442 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4443 if (!Value) {
4444 //Error(Loc, "expected constant expression");
4445 return false;
4446 }
4447
4448 getTargetStreamer().emitInst(Value->getValue());
4449
4450 if (getLexer().is(AsmToken::EndOfStatement))
4451 break;
4452
4453 if (getLexer().isNot(AsmToken::Comma)) {
4454 //Error(Loc, "unexpected token in directive");
4455 return false;
4456 }
4457
4458 Parser.Lex(); // Eat comma.
4459 }
4460
4461 Parser.Lex();
4462 return false;
4463 }
4464
4465 // parseDirectiveTLSDescCall:
4466 // ::= .tlsdesccall symbol
parseDirectiveTLSDescCall(SMLoc L)4467 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4468 StringRef Name;
4469 if (getParser().parseIdentifier(Name))
4470 //return Error(L, "expected symbol after directive");
4471 return true;
4472
4473 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4474 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4475 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4476
4477 MCInst Inst;
4478 Inst.setOpcode(AArch64::TLSDESCCALL);
4479 Inst.addOperand(MCOperand::createExpr(Expr));
4480
4481 unsigned int KsError;
4482 getParser().getStreamer().EmitInstruction(Inst, getSTI(), KsError);
4483 return false;
4484 }
4485
4486 /// ::= .loh <lohName | lohId> label1, ..., labelN
4487 /// The number of arguments depends on the loh identifier.
parseDirectiveLOH(StringRef IDVal,SMLoc Loc)4488 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc)
4489 {
4490 if (IDVal != MCLOHDirectiveName())
4491 return true;
4492 MCLOHType Kind;
4493 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4494 if (getParser().getTok().isNot(AsmToken::Integer))
4495 //return TokError("expected an identifier or a number in directive");
4496 return true;
4497 // We successfully get a numeric value for the identifier.
4498 // Check if it is valid.
4499 bool valid;
4500 int64_t Id = getParser().getTok().getIntVal(valid);
4501 if (!valid)
4502 return MatchOperand_ParseFail;
4503 if (Id <= -1U && !isValidMCLOHType(Id))
4504 //return TokError("invalid numeric identifier in directive");
4505 return true;
4506 Kind = (MCLOHType)Id;
4507 } else {
4508 StringRef Name = getTok().getIdentifier();
4509 // We successfully parse an identifier.
4510 // Check if it is a recognized one.
4511 int Id = MCLOHNameToId(Name);
4512
4513 if (Id == -1)
4514 //return TokError("invalid identifier in directive");
4515 return true;
4516 Kind = (MCLOHType)Id;
4517 }
4518 // Consume the identifier.
4519 Lex();
4520 // Get the number of arguments of this LOH.
4521 int NbArgs = MCLOHIdToNbArgs(Kind);
4522
4523 assert(NbArgs != -1 && "Invalid number of arguments");
4524
4525 SmallVector<MCSymbol *, 3> Args;
4526 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4527 StringRef Name;
4528 if (getParser().parseIdentifier(Name))
4529 //return TokError("expected identifier in directive");
4530 return true;
4531 Args.push_back(getContext().getOrCreateSymbol(Name));
4532
4533 if (Idx + 1 == NbArgs)
4534 break;
4535 if (getLexer().isNot(AsmToken::Comma))
4536 //return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4537 return true;
4538 Lex();
4539 }
4540 if (getLexer().isNot(AsmToken::EndOfStatement))
4541 //return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4542 return true;
4543
4544 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4545 return false;
4546 }
4547
4548 /// parseDirectiveLtorg
4549 /// ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)4550 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4551 getTargetStreamer().emitCurrentConstantPool();
4552 return false;
4553 }
4554
4555 /// parseDirectiveReq
4556 /// ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)4557 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L)
4558 {
4559 MCAsmParser &Parser = getParser();
4560 Parser.Lex(); // Eat the '.req' token.
4561 //SMLoc SRegLoc = getLoc();
4562 unsigned RegNum = tryParseRegister();
4563 bool IsVector = false;
4564
4565 if (RegNum == static_cast<unsigned>(-1)) {
4566 StringRef Kind;
4567 RegNum = tryMatchVectorRegister(Kind, false);
4568 if (!Kind.empty()) {
4569 //Error(SRegLoc, "vector register without type specifier expected");
4570 return false;
4571 }
4572 IsVector = true;
4573 }
4574
4575 if (RegNum == static_cast<unsigned>(-1)) {
4576 Parser.eatToEndOfStatement();
4577 //Error(SRegLoc, "register name or alias expected");
4578 return false;
4579 }
4580
4581 // Shouldn't be anything else.
4582 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4583 //Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4584 Parser.eatToEndOfStatement();
4585 return false;
4586 }
4587
4588 Parser.Lex(); // Consume the EndOfStatement
4589
4590 auto pair = std::make_pair(IsVector, RegNum);
4591 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4592 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4593
4594 return true;
4595 }
4596
4597 /// parseDirectiveUneq
4598 /// ::= .unreq registername
parseDirectiveUnreq(SMLoc L)4599 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L)
4600 {
4601 MCAsmParser &Parser = getParser();
4602 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4603 //Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4604 Parser.eatToEndOfStatement();
4605 return false;
4606 }
4607 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4608 Parser.Lex(); // Eat the identifier.
4609 return false;
4610 }
4611
4612 bool
classifySymbolRef(const MCExpr * Expr,AArch64MCExpr::VariantKind & ELFRefKind,MCSymbolRefExpr::VariantKind & DarwinRefKind,int64_t & Addend)4613 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4614 AArch64MCExpr::VariantKind &ELFRefKind,
4615 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4616 int64_t &Addend) {
4617 ELFRefKind = AArch64MCExpr::VK_INVALID;
4618 DarwinRefKind = MCSymbolRefExpr::VK_None;
4619 Addend = 0;
4620
4621 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4622 ELFRefKind = AE->getKind();
4623 Expr = AE->getSubExpr();
4624 }
4625
4626 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4627 if (SE) {
4628 // It's a simple symbol reference with no addend.
4629 DarwinRefKind = SE->getKind();
4630 return true;
4631 }
4632
4633 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4634 if (!BE)
4635 return false;
4636
4637 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4638 if (!SE)
4639 return false;
4640 DarwinRefKind = SE->getKind();
4641
4642 if (BE->getOpcode() != MCBinaryExpr::Add &&
4643 BE->getOpcode() != MCBinaryExpr::Sub)
4644 return false;
4645
4646 // See if the addend is is a constant, otherwise there's more going
4647 // on here than we can deal with.
4648 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4649 if (!AddendExpr)
4650 return false;
4651
4652 Addend = AddendExpr->getValue();
4653 if (BE->getOpcode() == MCBinaryExpr::Sub)
4654 Addend = -Addend;
4655
4656 // It's some symbol reference + a constant addend, but really
4657 // shouldn't use both Darwin and ELF syntax.
4658 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4659 DarwinRefKind == MCSymbolRefExpr::VK_None;
4660 }
4661
4662 /// Force static initialization.
LLVMInitializeAArch64AsmParser()4663 extern "C" void LLVMInitializeAArch64AsmParser() {
4664 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4665 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4666 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4667 }
4668
4669 #define GET_REGISTER_MATCHER
4670 #define GET_SUBTARGET_FEATURE_NAME
4671 #define GET_MATCHER_IMPLEMENTATION
4672 #include "AArch64GenAsmMatcher.inc"
4673
4674 // Define this matcher function after the auto-generated include so we
4675 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)4676 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4677 unsigned Kind) {
4678 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4679 // If the kind is a token for a literal immediate, check if our asm
4680 // operand matches. This is for InstAliases which have a fixed-value
4681 // immediate in the syntax.
4682 int64_t ExpectedVal;
4683 switch (Kind) {
4684 default:
4685 return Match_InvalidOperand;
4686 case MCK__35_0:
4687 ExpectedVal = 0;
4688 break;
4689 case MCK__35_1:
4690 ExpectedVal = 1;
4691 break;
4692 case MCK__35_12:
4693 ExpectedVal = 12;
4694 break;
4695 case MCK__35_16:
4696 ExpectedVal = 16;
4697 break;
4698 case MCK__35_2:
4699 ExpectedVal = 2;
4700 break;
4701 case MCK__35_24:
4702 ExpectedVal = 24;
4703 break;
4704 case MCK__35_3:
4705 ExpectedVal = 3;
4706 break;
4707 case MCK__35_32:
4708 ExpectedVal = 32;
4709 break;
4710 case MCK__35_4:
4711 ExpectedVal = 4;
4712 break;
4713 case MCK__35_48:
4714 ExpectedVal = 48;
4715 break;
4716 case MCK__35_6:
4717 ExpectedVal = 6;
4718 break;
4719 case MCK__35_64:
4720 ExpectedVal = 64;
4721 break;
4722 case MCK__35_8:
4723 ExpectedVal = 8;
4724 break;
4725 }
4726 if (!Op.isImm())
4727 return Match_InvalidOperand;
4728 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4729 if (!CE)
4730 return Match_InvalidOperand;
4731 if (CE->getValue() == ExpectedVal)
4732 return Match_Success;
4733 return Match_InvalidOperand;
4734 }
4735
4736
4737 AArch64AsmParser::OperandMatchResultTy
tryParseGPRSeqPair(OperandVector & Operands)4738 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands)
4739 {
4740 SMLoc S = getLoc();
4741
4742 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4743 //Error(S, "expected register");
4744 return MatchOperand_ParseFail;
4745 }
4746
4747 int FirstReg = tryParseRegister();
4748 if (FirstReg == -1) {
4749 return MatchOperand_ParseFail;
4750 }
4751 const MCRegisterClass &WRegClass =
4752 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4753 const MCRegisterClass &XRegClass =
4754 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4755
4756 bool isXReg = XRegClass.contains(FirstReg),
4757 isWReg = WRegClass.contains(FirstReg);
4758 if (!isXReg && !isWReg) {
4759 //Error(S, "expected first even register of a "
4760 // "consecutive same-size even/odd register pair");
4761 return MatchOperand_ParseFail;
4762 }
4763
4764 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4765 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4766
4767 if (FirstEncoding & 0x1) {
4768 //Error(S, "expected first even register of a "
4769 // "consecutive same-size even/odd register pair");
4770 return MatchOperand_ParseFail;
4771 }
4772
4773 //SMLoc M = getLoc();
4774 if (getParser().getTok().isNot(AsmToken::Comma)) {
4775 //Error(M, "expected comma");
4776 return MatchOperand_ParseFail;
4777 }
4778 // Eat the comma
4779 getParser().Lex();
4780
4781 //SMLoc E = getLoc();
4782 int SecondReg = tryParseRegister();
4783 if (SecondReg ==-1) {
4784 return MatchOperand_ParseFail;
4785 }
4786
4787 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4788 (isXReg && !XRegClass.contains(SecondReg)) ||
4789 (isWReg && !WRegClass.contains(SecondReg))) {
4790 //Error(E,"expected second odd register of a "
4791 // "consecutive same-size even/odd register pair");
4792 return MatchOperand_ParseFail;
4793 }
4794
4795 unsigned Pair = 0;
4796 if(isXReg) {
4797 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4798 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4799 } else {
4800 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4801 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4802 }
4803
4804 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4805 getContext()));
4806
4807 return MatchOperand_Success;
4808 }
4809