1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSet.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
37 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/MC/MCSection.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSubtargetInfo.h"
43 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/TargetRegistry.h"
45 #include "llvm/Support/ARMBuildAttributes.h"
46 #include "llvm/Support/ARMEHABI.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/SMLoc.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/TargetParser/SubtargetFeature.h"
55 #include "llvm/TargetParser/TargetParser.h"
56 #include "llvm/TargetParser/Triple.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <string>
65 #include <utility>
66 #include <vector>
67 
68 #define DEBUG_TYPE "asm-parser"
69 
70 using namespace llvm;
71 
72 namespace llvm {
73 struct ARMInstrTable {
74   MCInstrDesc Insts[4445];
75   MCOperandInfo OperandInfo[3026];
76   MCPhysReg ImplicitOps[130];
77 };
78 extern const ARMInstrTable ARMDescs;
79 } // end namespace llvm
80 
81 namespace {
82 
83 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
84 
85 static cl::opt<ImplicitItModeTy> ImplicitItMode(
86     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
87     cl::desc("Allow conditional instructions outdside of an IT block"),
88     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
89                           "Accept in both ISAs, emit implicit ITs in Thumb"),
90                clEnumValN(ImplicitItModeTy::Never, "never",
91                           "Warn in ARM, reject in Thumb"),
92                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
93                           "Accept in ARM, reject in Thumb"),
94                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
95                           "Warn in ARM, emit implicit ITs in Thumb")));
96 
97 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
98                                         cl::init(false));
99 
100 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
101 
extractITMaskBit(unsigned Mask,unsigned Position)102 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
103   // Position==0 means we're not in an IT block at all. Position==1
104   // means we want the first state bit, which is always 0 (Then).
105   // Position==2 means we want the second state bit, stored at bit 3
106   // of Mask, and so on downwards. So (5 - Position) will shift the
107   // right bit down to bit 0, including the always-0 bit at bit 4 for
108   // the mandatory initial Then.
109   return (Mask >> (5 - Position) & 1);
110 }
111 
112 class UnwindContext {
113   using Locs = SmallVector<SMLoc, 4>;
114 
115   MCAsmParser &Parser;
116   Locs FnStartLocs;
117   Locs CantUnwindLocs;
118   Locs PersonalityLocs;
119   Locs PersonalityIndexLocs;
120   Locs HandlerDataLocs;
121   int FPReg;
122 
123 public:
UnwindContext(MCAsmParser & P)124   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
125 
hasFnStart() const126   bool hasFnStart() const { return !FnStartLocs.empty(); }
cantUnwind() const127   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
hasHandlerData() const128   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
129 
hasPersonality() const130   bool hasPersonality() const {
131     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
132   }
133 
recordFnStart(SMLoc L)134   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
recordCantUnwind(SMLoc L)135   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
recordPersonality(SMLoc L)136   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
recordHandlerData(SMLoc L)137   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
recordPersonalityIndex(SMLoc L)138   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
139 
saveFPReg(int Reg)140   void saveFPReg(int Reg) { FPReg = Reg; }
getFPReg() const141   int getFPReg() const { return FPReg; }
142 
emitFnStartLocNotes() const143   void emitFnStartLocNotes() const {
144     for (const SMLoc &Loc : FnStartLocs)
145       Parser.Note(Loc, ".fnstart was specified here");
146   }
147 
emitCantUnwindLocNotes() const148   void emitCantUnwindLocNotes() const {
149     for (const SMLoc &Loc : CantUnwindLocs)
150       Parser.Note(Loc, ".cantunwind was specified here");
151   }
152 
emitHandlerDataLocNotes() const153   void emitHandlerDataLocNotes() const {
154     for (const SMLoc &Loc : HandlerDataLocs)
155       Parser.Note(Loc, ".handlerdata was specified here");
156   }
157 
emitPersonalityLocNotes() const158   void emitPersonalityLocNotes() const {
159     for (Locs::const_iterator PI = PersonalityLocs.begin(),
160                               PE = PersonalityLocs.end(),
161                               PII = PersonalityIndexLocs.begin(),
162                               PIE = PersonalityIndexLocs.end();
163          PI != PE || PII != PIE;) {
164       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165         Parser.Note(*PI++, ".personality was specified here");
166       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167         Parser.Note(*PII++, ".personalityindex was specified here");
168       else
169         llvm_unreachable(".personality and .personalityindex cannot be "
170                          "at the same location");
171     }
172   }
173 
reset()174   void reset() {
175     FnStartLocs = Locs();
176     CantUnwindLocs = Locs();
177     PersonalityLocs = Locs();
178     HandlerDataLocs = Locs();
179     PersonalityIndexLocs = Locs();
180     FPReg = ARM::SP;
181   }
182 };
183 
184 // Various sets of ARM instruction mnemonics which are used by the asm parser
185 class ARMMnemonicSets {
186   StringSet<> CDE;
187   StringSet<> CDEWithVPTSuffix;
188 public:
189   ARMMnemonicSets(const MCSubtargetInfo &STI);
190 
191   /// Returns true iff a given mnemonic is a CDE instruction
isCDEInstr(StringRef Mnemonic)192   bool isCDEInstr(StringRef Mnemonic) {
193     // Quick check before searching the set
194     if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
195       return false;
196     return CDE.count(Mnemonic);
197   }
198 
199   /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200   /// (possibly with a predication suffix "e" or "t")
isVPTPredicableCDEInstr(StringRef Mnemonic)201   bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202     if (!Mnemonic.starts_with("vcx"))
203       return false;
204     return CDEWithVPTSuffix.count(Mnemonic);
205   }
206 
207   /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208   /// (possibly with a condition suffix)
isITPredicableCDEInstr(StringRef Mnemonic)209   bool isITPredicableCDEInstr(StringRef Mnemonic) {
210     if (!Mnemonic.starts_with("cx"))
211       return false;
212     return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
213            Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
214            Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
215   }
216 
217   /// Return true iff a given mnemonic is an integer CDE instruction with
218   /// dual-register destination
isCDEDualRegInstr(StringRef Mnemonic)219   bool isCDEDualRegInstr(StringRef Mnemonic) {
220     if (!Mnemonic.starts_with("cx"))
221       return false;
222     return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223            Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224            Mnemonic == "cx3d" || Mnemonic == "cx3da";
225   }
226 };
227 
ARMMnemonicSets(const MCSubtargetInfo & STI)228 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229   for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230                              "cx2", "cx2a", "cx2d", "cx2da",
231                              "cx3", "cx3a", "cx3d", "cx3da", })
232     CDE.insert(Mnemonic);
233   for (StringRef Mnemonic :
234        {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235     CDE.insert(Mnemonic);
236     CDEWithVPTSuffix.insert(Mnemonic);
237     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239   }
240 }
241 
242 class ARMAsmParser : public MCTargetAsmParser {
243   const MCRegisterInfo *MRI;
244   UnwindContext UC;
245   ARMMnemonicSets MS;
246 
getTargetStreamer()247   ARMTargetStreamer &getTargetStreamer() {
248     assert(getParser().getStreamer().getTargetStreamer() &&
249            "do not have a target streamer");
250     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
251     return static_cast<ARMTargetStreamer &>(TS);
252   }
253 
254   // Map of register aliases registers via the .req directive.
255   StringMap<unsigned> RegisterReqs;
256 
257   bool NextSymbolIsThumb;
258 
useImplicitITThumb() const259   bool useImplicitITThumb() const {
260     return ImplicitItMode == ImplicitItModeTy::Always ||
261            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262   }
263 
useImplicitITARM() const264   bool useImplicitITARM() const {
265     return ImplicitItMode == ImplicitItModeTy::Always ||
266            ImplicitItMode == ImplicitItModeTy::ARMOnly;
267   }
268 
269   struct {
270     ARMCC::CondCodes Cond;    // Condition for IT block.
271     unsigned Mask:4;          // Condition mask for instructions.
272                               // Starting at first 1 (from lsb).
273                               //   '1'  condition as indicated in IT.
274                               //   '0'  inverse of condition (else).
275                               // Count of instructions in IT block is
276                               // 4 - trailingzeroes(mask)
277                               // Note that this does not have the same encoding
278                               // as in the IT instruction, which also depends
279                               // on the low bit of the condition code.
280 
281     unsigned CurPosition;     // Current position in parsing of IT
282                               // block. In range [0,4], with 0 being the IT
283                               // instruction itself. Initialized according to
284                               // count of instructions in block.  ~0U if no
285                               // active IT block.
286 
287     bool IsExplicit;          // true  - The IT instruction was present in the
288                               //         input, we should not modify it.
289                               // false - The IT instruction was added
290                               //         implicitly, we can extend it if that
291                               //         would be legal.
292   } ITState;
293 
294   SmallVector<MCInst, 4> PendingConditionalInsts;
295 
flushPendingInstructions(MCStreamer & Out)296   void flushPendingInstructions(MCStreamer &Out) override {
297     if (!inImplicitITBlock()) {
298       assert(PendingConditionalInsts.size() == 0);
299       return;
300     }
301 
302     // Emit the IT instruction
303     MCInst ITInst;
304     ITInst.setOpcode(ARM::t2IT);
305     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307     Out.emitInstruction(ITInst, getSTI());
308 
309     // Emit the conditional instructions
310     assert(PendingConditionalInsts.size() <= 4);
311     for (const MCInst &Inst : PendingConditionalInsts) {
312       Out.emitInstruction(Inst, getSTI());
313     }
314     PendingConditionalInsts.clear();
315 
316     // Clear the IT state
317     ITState.Mask = 0;
318     ITState.CurPosition = ~0U;
319   }
320 
inITBlock()321   bool inITBlock() { return ITState.CurPosition != ~0U; }
inExplicitITBlock()322   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
inImplicitITBlock()323   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324 
lastInITBlock()325   bool lastInITBlock() {
326     return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
327   }
328 
forwardITPosition()329   void forwardITPosition() {
330     if (!inITBlock()) return;
331     // Move to the next instruction in the IT block, if there is one. If not,
332     // mark the block as done, except for implicit IT blocks, which we leave
333     // open until we find an instruction that can't be added to it.
334     unsigned TZ = llvm::countr_zero(ITState.Mask);
335     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336       ITState.CurPosition = ~0U; // Done with the IT block after this.
337   }
338 
339   // Rewind the state of the current IT block, removing the last slot from it.
rewindImplicitITPosition()340   void rewindImplicitITPosition() {
341     assert(inImplicitITBlock());
342     assert(ITState.CurPosition > 1);
343     ITState.CurPosition--;
344     unsigned TZ = llvm::countr_zero(ITState.Mask);
345     unsigned NewMask = 0;
346     NewMask |= ITState.Mask & (0xC << TZ);
347     NewMask |= 0x2 << TZ;
348     ITState.Mask = NewMask;
349   }
350 
351   // Rewind the state of the current IT block, removing the last slot from it.
352   // If we were at the first slot, this closes the IT block.
discardImplicitITBlock()353   void discardImplicitITBlock() {
354     assert(inImplicitITBlock());
355     assert(ITState.CurPosition == 1);
356     ITState.CurPosition = ~0U;
357   }
358 
359   // Return the low-subreg of a given Q register.
getDRegFromQReg(unsigned QReg) const360   unsigned getDRegFromQReg(unsigned QReg) const {
361     return MRI->getSubReg(QReg, ARM::dsub_0);
362   }
363 
364   // Get the condition code corresponding to the current IT block slot.
currentITCond()365   ARMCC::CondCodes currentITCond() {
366     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368   }
369 
370   // Invert the condition of the current IT block slot without changing any
371   // other slots in the same block.
invertCurrentITCondition()372   void invertCurrentITCondition() {
373     if (ITState.CurPosition == 1) {
374       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375     } else {
376       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377     }
378   }
379 
380   // Returns true if the current IT block is full (all 4 slots used).
isITBlockFull()381   bool isITBlockFull() {
382     return inITBlock() && (ITState.Mask & 1);
383   }
384 
385   // Extend the current implicit IT block to have one more slot with the given
386   // condition code.
extendImplicitITBlock(ARMCC::CondCodes Cond)387   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388     assert(inImplicitITBlock());
389     assert(!isITBlockFull());
390     assert(Cond == ITState.Cond ||
391            Cond == ARMCC::getOppositeCondition(ITState.Cond));
392     unsigned TZ = llvm::countr_zero(ITState.Mask);
393     unsigned NewMask = 0;
394     // Keep any existing condition bits.
395     NewMask |= ITState.Mask & (0xE << TZ);
396     // Insert the new condition bit.
397     NewMask |= (Cond != ITState.Cond) << TZ;
398     // Move the trailing 1 down one bit.
399     NewMask |= 1 << (TZ - 1);
400     ITState.Mask = NewMask;
401   }
402 
403   // Create a new implicit IT block with a dummy condition code.
startImplicitITBlock()404   void startImplicitITBlock() {
405     assert(!inITBlock());
406     ITState.Cond = ARMCC::AL;
407     ITState.Mask = 8;
408     ITState.CurPosition = 1;
409     ITState.IsExplicit = false;
410   }
411 
412   // Create a new explicit IT block with the given condition and mask.
413   // The mask should be in the format used in ARMOperand and
414   // MCOperand, with a 1 implying 'e', regardless of the low bit of
415   // the condition.
startExplicitITBlock(ARMCC::CondCodes Cond,unsigned Mask)416   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417     assert(!inITBlock());
418     ITState.Cond = Cond;
419     ITState.Mask = Mask;
420     ITState.CurPosition = 0;
421     ITState.IsExplicit = true;
422   }
423 
424   struct {
425     unsigned Mask : 4;
426     unsigned CurPosition;
427   } VPTState;
inVPTBlock()428   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
forwardVPTPosition()429   void forwardVPTPosition() {
430     if (!inVPTBlock()) return;
431     unsigned TZ = llvm::countr_zero(VPTState.Mask);
432     if (++VPTState.CurPosition == 5 - TZ)
433       VPTState.CurPosition = ~0U;
434   }
435 
Note(SMLoc L,const Twine & Msg,SMRange Range=std::nullopt)436   void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
437     return getParser().Note(L, Msg, Range);
438   }
439 
Warning(SMLoc L,const Twine & Msg,SMRange Range=std::nullopt)440   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
441     return getParser().Warning(L, Msg, Range);
442   }
443 
Error(SMLoc L,const Twine & Msg,SMRange Range=std::nullopt)444   bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
445     return getParser().Error(L, Msg, Range);
446   }
447 
448   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449                            unsigned ListNo, bool IsARPop = false);
450   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451                            unsigned ListNo);
452 
453   int tryParseRegister();
454   bool tryParseRegisterWithWriteBack(OperandVector &);
455   int tryParseShiftRegister(OperandVector &);
456   bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
457                          bool AllowRAAC = false);
458   bool parseMemory(OperandVector &);
459   bool parseOperand(OperandVector &, StringRef Mnemonic);
460   bool parseImmExpr(int64_t &Out);
461   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
462   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
463                               unsigned &ShiftAmount);
464   bool parseLiteralValues(unsigned Size, SMLoc L);
465   bool parseDirectiveThumb(SMLoc L);
466   bool parseDirectiveARM(SMLoc L);
467   bool parseDirectiveThumbFunc(SMLoc L);
468   bool parseDirectiveCode(SMLoc L);
469   bool parseDirectiveSyntax(SMLoc L);
470   bool parseDirectiveReq(StringRef Name, SMLoc L);
471   bool parseDirectiveUnreq(SMLoc L);
472   bool parseDirectiveArch(SMLoc L);
473   bool parseDirectiveEabiAttr(SMLoc L);
474   bool parseDirectiveCPU(SMLoc L);
475   bool parseDirectiveFPU(SMLoc L);
476   bool parseDirectiveFnStart(SMLoc L);
477   bool parseDirectiveFnEnd(SMLoc L);
478   bool parseDirectiveCantUnwind(SMLoc L);
479   bool parseDirectivePersonality(SMLoc L);
480   bool parseDirectiveHandlerData(SMLoc L);
481   bool parseDirectiveSetFP(SMLoc L);
482   bool parseDirectivePad(SMLoc L);
483   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
484   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
485   bool parseDirectiveLtorg(SMLoc L);
486   bool parseDirectiveEven(SMLoc L);
487   bool parseDirectivePersonalityIndex(SMLoc L);
488   bool parseDirectiveUnwindRaw(SMLoc L);
489   bool parseDirectiveTLSDescSeq(SMLoc L);
490   bool parseDirectiveMovSP(SMLoc L);
491   bool parseDirectiveObjectArch(SMLoc L);
492   bool parseDirectiveArchExtension(SMLoc L);
493   bool parseDirectiveAlign(SMLoc L);
494   bool parseDirectiveThumbSet(SMLoc L);
495 
496   bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
497   bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
498   bool parseDirectiveSEHSaveSP(SMLoc L);
499   bool parseDirectiveSEHSaveFRegs(SMLoc L);
500   bool parseDirectiveSEHSaveLR(SMLoc L);
501   bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
502   bool parseDirectiveSEHNop(SMLoc L, bool Wide);
503   bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
504   bool parseDirectiveSEHEpilogEnd(SMLoc L);
505   bool parseDirectiveSEHCustom(SMLoc L);
506 
507   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
508   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
509                           unsigned &PredicationCode,
510                           unsigned &VPTPredicationCode, bool &CarrySetting,
511                           unsigned &ProcessorIMod, StringRef &ITMask);
512   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
513                              StringRef FullInst, bool &CanAcceptCarrySet,
514                              bool &CanAcceptPredicationCode,
515                              bool &CanAcceptVPTPredicationCode);
516   bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
517 
518   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
519                                      OperandVector &Operands);
520   bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
521 
isThumb() const522   bool isThumb() const {
523     // FIXME: Can tablegen auto-generate this?
524     return getSTI().hasFeature(ARM::ModeThumb);
525   }
526 
isThumbOne() const527   bool isThumbOne() const {
528     return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
529   }
530 
isThumbTwo() const531   bool isThumbTwo() const {
532     return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
533   }
534 
hasThumb() const535   bool hasThumb() const {
536     return getSTI().hasFeature(ARM::HasV4TOps);
537   }
538 
hasThumb2() const539   bool hasThumb2() const {
540     return getSTI().hasFeature(ARM::FeatureThumb2);
541   }
542 
hasV6Ops() const543   bool hasV6Ops() const {
544     return getSTI().hasFeature(ARM::HasV6Ops);
545   }
546 
hasV6T2Ops() const547   bool hasV6T2Ops() const {
548     return getSTI().hasFeature(ARM::HasV6T2Ops);
549   }
550 
hasV6MOps() const551   bool hasV6MOps() const {
552     return getSTI().hasFeature(ARM::HasV6MOps);
553   }
554 
hasV7Ops() const555   bool hasV7Ops() const {
556     return getSTI().hasFeature(ARM::HasV7Ops);
557   }
558 
hasV8Ops() const559   bool hasV8Ops() const {
560     return getSTI().hasFeature(ARM::HasV8Ops);
561   }
562 
hasV8MBaseline() const563   bool hasV8MBaseline() const {
564     return getSTI().hasFeature(ARM::HasV8MBaselineOps);
565   }
566 
hasV8MMainline() const567   bool hasV8MMainline() const {
568     return getSTI().hasFeature(ARM::HasV8MMainlineOps);
569   }
hasV8_1MMainline() const570   bool hasV8_1MMainline() const {
571     return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
572   }
hasMVE() const573   bool hasMVE() const {
574     return getSTI().hasFeature(ARM::HasMVEIntegerOps);
575   }
hasMVEFloat() const576   bool hasMVEFloat() const {
577     return getSTI().hasFeature(ARM::HasMVEFloatOps);
578   }
hasCDE() const579   bool hasCDE() const {
580     return getSTI().hasFeature(ARM::HasCDEOps);
581   }
has8MSecExt() const582   bool has8MSecExt() const {
583     return getSTI().hasFeature(ARM::Feature8MSecExt);
584   }
585 
hasARM() const586   bool hasARM() const {
587     return !getSTI().hasFeature(ARM::FeatureNoARM);
588   }
589 
hasDSP() const590   bool hasDSP() const {
591     return getSTI().hasFeature(ARM::FeatureDSP);
592   }
593 
hasD32() const594   bool hasD32() const {
595     return getSTI().hasFeature(ARM::FeatureD32);
596   }
597 
hasV8_1aOps() const598   bool hasV8_1aOps() const {
599     return getSTI().hasFeature(ARM::HasV8_1aOps);
600   }
601 
hasRAS() const602   bool hasRAS() const {
603     return getSTI().hasFeature(ARM::FeatureRAS);
604   }
605 
SwitchMode()606   void SwitchMode() {
607     MCSubtargetInfo &STI = copySTI();
608     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
609     setAvailableFeatures(FB);
610   }
611 
612   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
613 
isMClass() const614   bool isMClass() const {
615     return getSTI().hasFeature(ARM::FeatureMClass);
616   }
617 
618   /// @name Auto-generated Match Functions
619   /// {
620 
621 #define GET_ASSEMBLER_HEADER
622 #include "ARMGenAsmMatcher.inc"
623 
624   /// }
625 
626   ParseStatus parseITCondCode(OperandVector &);
627   ParseStatus parseCoprocNumOperand(OperandVector &);
628   ParseStatus parseCoprocRegOperand(OperandVector &);
629   ParseStatus parseCoprocOptionOperand(OperandVector &);
630   ParseStatus parseMemBarrierOptOperand(OperandVector &);
631   ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
632   ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
633   ParseStatus parseProcIFlagsOperand(OperandVector &);
634   ParseStatus parseMSRMaskOperand(OperandVector &);
635   ParseStatus parseBankedRegOperand(OperandVector &);
636   ParseStatus parsePKHImm(OperandVector &O, StringRef Op, int Low, int High);
parsePKHLSLImm(OperandVector & O)637   ParseStatus parsePKHLSLImm(OperandVector &O) {
638     return parsePKHImm(O, "lsl", 0, 31);
639   }
parsePKHASRImm(OperandVector & O)640   ParseStatus parsePKHASRImm(OperandVector &O) {
641     return parsePKHImm(O, "asr", 1, 32);
642   }
643   ParseStatus parseSetEndImm(OperandVector &);
644   ParseStatus parseShifterImm(OperandVector &);
645   ParseStatus parseRotImm(OperandVector &);
646   ParseStatus parseModImm(OperandVector &);
647   ParseStatus parseBitfield(OperandVector &);
648   ParseStatus parsePostIdxReg(OperandVector &);
649   ParseStatus parseAM3Offset(OperandVector &);
650   ParseStatus parseFPImm(OperandVector &);
651   ParseStatus parseVectorList(OperandVector &);
652   ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
653                               SMLoc &EndLoc);
654 
655   // Asm Match Converter Methods
656   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
657   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
658   void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
659 
660   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
661   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
662   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
663   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
664   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
665   bool isITBlockTerminator(MCInst &Inst) const;
666   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
667   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
668                         bool Load, bool ARMMode, bool Writeback);
669 
670 public:
671   enum ARMMatchResultTy {
672     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
673     Match_RequiresNotITBlock,
674     Match_RequiresV6,
675     Match_RequiresThumb2,
676     Match_RequiresV8,
677     Match_RequiresFlagSetting,
678 #define GET_OPERAND_DIAGNOSTIC_TYPES
679 #include "ARMGenAsmMatcher.inc"
680 
681   };
682 
ARMAsmParser(const MCSubtargetInfo & STI,MCAsmParser & Parser,const MCInstrInfo & MII,const MCTargetOptions & Options)683   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
684                const MCInstrInfo &MII, const MCTargetOptions &Options)
685     : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
686     MCAsmParserExtension::Initialize(Parser);
687 
688     // Cache the MCRegisterInfo.
689     MRI = getContext().getRegisterInfo();
690 
691     // Initialize the set of available features.
692     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
693 
694     // Add build attributes based on the selected target.
695     if (AddBuildAttributes)
696       getTargetStreamer().emitTargetAttributes(STI);
697 
698     // Not in an ITBlock to start with.
699     ITState.CurPosition = ~0U;
700 
701     VPTState.CurPosition = ~0U;
702 
703     NextSymbolIsThumb = false;
704   }
705 
706   // Implementation of the MCTargetAsmParser interface:
707   bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
708   ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
709                                SMLoc &EndLoc) override;
710   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
711                         SMLoc NameLoc, OperandVector &Operands) override;
712   bool ParseDirective(AsmToken DirectiveID) override;
713 
714   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
715                                       unsigned Kind) override;
716   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
717 
718   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
719                                OperandVector &Operands, MCStreamer &Out,
720                                uint64_t &ErrorInfo,
721                                bool MatchingInlineAsm) override;
722   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
723                             SmallVectorImpl<NearMissInfo> &NearMisses,
724                             bool MatchingInlineAsm, bool &EmitInITBlock,
725                             MCStreamer &Out);
726 
727   struct NearMissMessage {
728     SMLoc Loc;
729     SmallString<128> Message;
730   };
731 
732   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
733 
734   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
735                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
736                         SMLoc IDLoc, OperandVector &Operands);
737   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
738                         OperandVector &Operands);
739 
740   void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
741 
742   void onLabelParsed(MCSymbol *Symbol) override;
743 };
744 
745 /// ARMOperand - Instances of this class represent a parsed ARM machine
746 /// operand.
747 class ARMOperand : public MCParsedAsmOperand {
748   enum KindTy {
749     k_CondCode,
750     k_VPTPred,
751     k_CCOut,
752     k_ITCondMask,
753     k_CoprocNum,
754     k_CoprocReg,
755     k_CoprocOption,
756     k_Immediate,
757     k_MemBarrierOpt,
758     k_InstSyncBarrierOpt,
759     k_TraceSyncBarrierOpt,
760     k_Memory,
761     k_PostIndexRegister,
762     k_MSRMask,
763     k_BankedReg,
764     k_ProcIFlags,
765     k_VectorIndex,
766     k_Register,
767     k_RegisterList,
768     k_RegisterListWithAPSR,
769     k_DPRRegisterList,
770     k_SPRRegisterList,
771     k_FPSRegisterListWithVPR,
772     k_FPDRegisterListWithVPR,
773     k_VectorList,
774     k_VectorListAllLanes,
775     k_VectorListIndexed,
776     k_ShiftedRegister,
777     k_ShiftedImmediate,
778     k_ShifterImmediate,
779     k_RotateImmediate,
780     k_ModifiedImmediate,
781     k_ConstantPoolImmediate,
782     k_BitfieldDescriptor,
783     k_Token,
784   } Kind;
785 
786   SMLoc StartLoc, EndLoc, AlignmentLoc;
787   SmallVector<unsigned, 8> Registers;
788 
789   struct CCOp {
790     ARMCC::CondCodes Val;
791   };
792 
793   struct VCCOp {
794     ARMVCC::VPTCodes Val;
795   };
796 
797   struct CopOp {
798     unsigned Val;
799   };
800 
801   struct CoprocOptionOp {
802     unsigned Val;
803   };
804 
805   struct ITMaskOp {
806     unsigned Mask:4;
807   };
808 
809   struct MBOptOp {
810     ARM_MB::MemBOpt Val;
811   };
812 
813   struct ISBOptOp {
814     ARM_ISB::InstSyncBOpt Val;
815   };
816 
817   struct TSBOptOp {
818     ARM_TSB::TraceSyncBOpt Val;
819   };
820 
821   struct IFlagsOp {
822     ARM_PROC::IFlags Val;
823   };
824 
825   struct MMaskOp {
826     unsigned Val;
827   };
828 
829   struct BankedRegOp {
830     unsigned Val;
831   };
832 
833   struct TokOp {
834     const char *Data;
835     unsigned Length;
836   };
837 
838   struct RegOp {
839     unsigned RegNum;
840   };
841 
842   // A vector register list is a sequential list of 1 to 4 registers.
843   struct VectorListOp {
844     unsigned RegNum;
845     unsigned Count;
846     unsigned LaneIndex;
847     bool isDoubleSpaced;
848   };
849 
850   struct VectorIndexOp {
851     unsigned Val;
852   };
853 
854   struct ImmOp {
855     const MCExpr *Val;
856   };
857 
858   /// Combined record for all forms of ARM address expressions.
859   struct MemoryOp {
860     unsigned BaseRegNum;
861     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
862     // was specified.
863     const MCExpr *OffsetImm;  // Offset immediate value
864     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
865     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
866     unsigned ShiftImm;        // shift for OffsetReg.
867     unsigned Alignment;       // 0 = no alignment specified
868     // n = alignment in bytes (2, 4, 8, 16, or 32)
869     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
870   };
871 
872   struct PostIdxRegOp {
873     unsigned RegNum;
874     bool isAdd;
875     ARM_AM::ShiftOpc ShiftTy;
876     unsigned ShiftImm;
877   };
878 
879   struct ShifterImmOp {
880     bool isASR;
881     unsigned Imm;
882   };
883 
884   struct RegShiftedRegOp {
885     ARM_AM::ShiftOpc ShiftTy;
886     unsigned SrcReg;
887     unsigned ShiftReg;
888     unsigned ShiftImm;
889   };
890 
891   struct RegShiftedImmOp {
892     ARM_AM::ShiftOpc ShiftTy;
893     unsigned SrcReg;
894     unsigned ShiftImm;
895   };
896 
897   struct RotImmOp {
898     unsigned Imm;
899   };
900 
901   struct ModImmOp {
902     unsigned Bits;
903     unsigned Rot;
904   };
905 
906   struct BitfieldOp {
907     unsigned LSB;
908     unsigned Width;
909   };
910 
911   union {
912     struct CCOp CC;
913     struct VCCOp VCC;
914     struct CopOp Cop;
915     struct CoprocOptionOp CoprocOption;
916     struct MBOptOp MBOpt;
917     struct ISBOptOp ISBOpt;
918     struct TSBOptOp TSBOpt;
919     struct ITMaskOp ITMask;
920     struct IFlagsOp IFlags;
921     struct MMaskOp MMask;
922     struct BankedRegOp BankedReg;
923     struct TokOp Tok;
924     struct RegOp Reg;
925     struct VectorListOp VectorList;
926     struct VectorIndexOp VectorIndex;
927     struct ImmOp Imm;
928     struct MemoryOp Memory;
929     struct PostIdxRegOp PostIdxReg;
930     struct ShifterImmOp ShifterImm;
931     struct RegShiftedRegOp RegShiftedReg;
932     struct RegShiftedImmOp RegShiftedImm;
933     struct RotImmOp RotImm;
934     struct ModImmOp ModImm;
935     struct BitfieldOp Bitfield;
936   };
937 
938 public:
ARMOperand(KindTy K)939   ARMOperand(KindTy K) : Kind(K) {}
940 
941   /// getStartLoc - Get the location of the first token of this operand.
getStartLoc() const942   SMLoc getStartLoc() const override { return StartLoc; }
943 
944   /// getEndLoc - Get the location of the last token of this operand.
getEndLoc() const945   SMLoc getEndLoc() const override { return EndLoc; }
946 
947   /// getLocRange - Get the range between the first and last token of this
948   /// operand.
getLocRange() const949   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
950 
951   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
getAlignmentLoc() const952   SMLoc getAlignmentLoc() const {
953     assert(Kind == k_Memory && "Invalid access!");
954     return AlignmentLoc;
955   }
956 
getCondCode() const957   ARMCC::CondCodes getCondCode() const {
958     assert(Kind == k_CondCode && "Invalid access!");
959     return CC.Val;
960   }
961 
getVPTPred() const962   ARMVCC::VPTCodes getVPTPred() const {
963     assert(isVPTPred() && "Invalid access!");
964     return VCC.Val;
965   }
966 
getCoproc() const967   unsigned getCoproc() const {
968     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
969     return Cop.Val;
970   }
971 
getToken() const972   StringRef getToken() const {
973     assert(Kind == k_Token && "Invalid access!");
974     return StringRef(Tok.Data, Tok.Length);
975   }
976 
getReg() const977   unsigned getReg() const override {
978     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
979     return Reg.RegNum;
980   }
981 
getRegList() const982   const SmallVectorImpl<unsigned> &getRegList() const {
983     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
984             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
985             Kind == k_FPSRegisterListWithVPR ||
986             Kind == k_FPDRegisterListWithVPR) &&
987            "Invalid access!");
988     return Registers;
989   }
990 
getImm() const991   const MCExpr *getImm() const {
992     assert(isImm() && "Invalid access!");
993     return Imm.Val;
994   }
995 
getConstantPoolImm() const996   const MCExpr *getConstantPoolImm() const {
997     assert(isConstantPoolImm() && "Invalid access!");
998     return Imm.Val;
999   }
1000 
getVectorIndex() const1001   unsigned getVectorIndex() const {
1002     assert(Kind == k_VectorIndex && "Invalid access!");
1003     return VectorIndex.Val;
1004   }
1005 
getMemBarrierOpt() const1006   ARM_MB::MemBOpt getMemBarrierOpt() const {
1007     assert(Kind == k_MemBarrierOpt && "Invalid access!");
1008     return MBOpt.Val;
1009   }
1010 
getInstSyncBarrierOpt() const1011   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1012     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1013     return ISBOpt.Val;
1014   }
1015 
getTraceSyncBarrierOpt() const1016   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1017     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1018     return TSBOpt.Val;
1019   }
1020 
getProcIFlags() const1021   ARM_PROC::IFlags getProcIFlags() const {
1022     assert(Kind == k_ProcIFlags && "Invalid access!");
1023     return IFlags.Val;
1024   }
1025 
getMSRMask() const1026   unsigned getMSRMask() const {
1027     assert(Kind == k_MSRMask && "Invalid access!");
1028     return MMask.Val;
1029   }
1030 
getBankedReg() const1031   unsigned getBankedReg() const {
1032     assert(Kind == k_BankedReg && "Invalid access!");
1033     return BankedReg.Val;
1034   }
1035 
isCoprocNum() const1036   bool isCoprocNum() const { return Kind == k_CoprocNum; }
isCoprocReg() const1037   bool isCoprocReg() const { return Kind == k_CoprocReg; }
isCoprocOption() const1038   bool isCoprocOption() const { return Kind == k_CoprocOption; }
isCondCode() const1039   bool isCondCode() const { return Kind == k_CondCode; }
isVPTPred() const1040   bool isVPTPred() const { return Kind == k_VPTPred; }
isCCOut() const1041   bool isCCOut() const { return Kind == k_CCOut; }
isITMask() const1042   bool isITMask() const { return Kind == k_ITCondMask; }
isITCondCode() const1043   bool isITCondCode() const { return Kind == k_CondCode; }
isImm() const1044   bool isImm() const override {
1045     return Kind == k_Immediate;
1046   }
1047 
isARMBranchTarget() const1048   bool isARMBranchTarget() const {
1049     if (!isImm()) return false;
1050 
1051     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1052       return CE->getValue() % 4 == 0;
1053     return true;
1054   }
1055 
1056 
isThumbBranchTarget() const1057   bool isThumbBranchTarget() const {
1058     if (!isImm()) return false;
1059 
1060     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1061       return CE->getValue() % 2 == 0;
1062     return true;
1063   }
1064 
1065   // checks whether this operand is an unsigned offset which fits is a field
1066   // of specified width and scaled by a specific number of bits
1067   template<unsigned width, unsigned scale>
isUnsignedOffset() const1068   bool isUnsignedOffset() const {
1069     if (!isImm()) return false;
1070     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1071     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1072       int64_t Val = CE->getValue();
1073       int64_t Align = 1LL << scale;
1074       int64_t Max = Align * ((1LL << width) - 1);
1075       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1076     }
1077     return false;
1078   }
1079 
1080   // checks whether this operand is an signed offset which fits is a field
1081   // of specified width and scaled by a specific number of bits
1082   template<unsigned width, unsigned scale>
isSignedOffset() const1083   bool isSignedOffset() const {
1084     if (!isImm()) return false;
1085     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1086     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087       int64_t Val = CE->getValue();
1088       int64_t Align = 1LL << scale;
1089       int64_t Max = Align * ((1LL << (width-1)) - 1);
1090       int64_t Min = -Align * (1LL << (width-1));
1091       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1092     }
1093     return false;
1094   }
1095 
1096   // checks whether this operand is an offset suitable for the LE /
1097   // LETP instructions in Arm v8.1M
isLEOffset() const1098   bool isLEOffset() const {
1099     if (!isImm()) return false;
1100     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1101     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1102       int64_t Val = CE->getValue();
1103       return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1104     }
1105     return false;
1106   }
1107 
1108   // checks whether this operand is a memory operand computed as an offset
1109   // applied to PC. the offset may have 8 bits of magnitude and is represented
1110   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1111   // relocable expression...
isThumbMemPC() const1112   bool isThumbMemPC() const {
1113     int64_t Val = 0;
1114     if (isImm()) {
1115       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1116       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1117       if (!CE) return false;
1118       Val = CE->getValue();
1119     }
1120     else if (isGPRMem()) {
1121       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1122       if(Memory.BaseRegNum != ARM::PC) return false;
1123       if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1124         Val = CE->getValue();
1125       else
1126         return false;
1127     }
1128     else return false;
1129     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1130   }
1131 
isFPImm() const1132   bool isFPImm() const {
1133     if (!isImm()) return false;
1134     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1135     if (!CE) return false;
1136     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1137     return Val != -1;
1138   }
1139 
1140   template<int64_t N, int64_t M>
isImmediate() const1141   bool isImmediate() const {
1142     if (!isImm()) return false;
1143     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1144     if (!CE) return false;
1145     int64_t Value = CE->getValue();
1146     return Value >= N && Value <= M;
1147   }
1148 
1149   template<int64_t N, int64_t M>
isImmediateS4() const1150   bool isImmediateS4() const {
1151     if (!isImm()) return false;
1152     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1153     if (!CE) return false;
1154     int64_t Value = CE->getValue();
1155     return ((Value & 3) == 0) && Value >= N && Value <= M;
1156   }
1157   template<int64_t N, int64_t M>
isImmediateS2() const1158   bool isImmediateS2() const {
1159     if (!isImm()) return false;
1160     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1161     if (!CE) return false;
1162     int64_t Value = CE->getValue();
1163     return ((Value & 1) == 0) && Value >= N && Value <= M;
1164   }
isFBits16() const1165   bool isFBits16() const {
1166     return isImmediate<0, 17>();
1167   }
isFBits32() const1168   bool isFBits32() const {
1169     return isImmediate<1, 33>();
1170   }
isImm8s4() const1171   bool isImm8s4() const {
1172     return isImmediateS4<-1020, 1020>();
1173   }
isImm7s4() const1174   bool isImm7s4() const {
1175     return isImmediateS4<-508, 508>();
1176   }
isImm7Shift0() const1177   bool isImm7Shift0() const {
1178     return isImmediate<-127, 127>();
1179   }
isImm7Shift1() const1180   bool isImm7Shift1() const {
1181     return isImmediateS2<-255, 255>();
1182   }
isImm7Shift2() const1183   bool isImm7Shift2() const {
1184     return isImmediateS4<-511, 511>();
1185   }
isImm7() const1186   bool isImm7() const {
1187     return isImmediate<-127, 127>();
1188   }
isImm0_1020s4() const1189   bool isImm0_1020s4() const {
1190     return isImmediateS4<0, 1020>();
1191   }
isImm0_508s4() const1192   bool isImm0_508s4() const {
1193     return isImmediateS4<0, 508>();
1194   }
isImm0_508s4Neg() const1195   bool isImm0_508s4Neg() const {
1196     if (!isImm()) return false;
1197     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198     if (!CE) return false;
1199     int64_t Value = -CE->getValue();
1200     // explicitly exclude zero. we want that to use the normal 0_508 version.
1201     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1202   }
1203 
isImm0_4095Neg() const1204   bool isImm0_4095Neg() const {
1205     if (!isImm()) return false;
1206     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1207     if (!CE) return false;
1208     // isImm0_4095Neg is used with 32-bit immediates only.
1209     // 32-bit immediates are zero extended to 64-bit when parsed,
1210     // thus simple -CE->getValue() results in a big negative number,
1211     // not a small positive number as intended
1212     if ((CE->getValue() >> 32) > 0) return false;
1213     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1214     return Value > 0 && Value < 4096;
1215   }
1216 
isImm0_7() const1217   bool isImm0_7() const {
1218     return isImmediate<0, 7>();
1219   }
1220 
isImm1_16() const1221   bool isImm1_16() const {
1222     return isImmediate<1, 16>();
1223   }
1224 
isImm1_32() const1225   bool isImm1_32() const {
1226     return isImmediate<1, 32>();
1227   }
1228 
isImm8_255() const1229   bool isImm8_255() const {
1230     return isImmediate<8, 255>();
1231   }
1232 
isImm0_255Expr() const1233   bool isImm0_255Expr() const {
1234     if (!isImm())
1235       return false;
1236     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1237     // If it's not a constant expression, it'll generate a fixup and be
1238     // handled later.
1239     if (!CE)
1240       return true;
1241     int64_t Value = CE->getValue();
1242     return isUInt<8>(Value);
1243   }
1244 
isImm256_65535Expr() const1245   bool isImm256_65535Expr() const {
1246     if (!isImm()) return false;
1247     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1248     // If it's not a constant expression, it'll generate a fixup and be
1249     // handled later.
1250     if (!CE) return true;
1251     int64_t Value = CE->getValue();
1252     return Value >= 256 && Value < 65536;
1253   }
1254 
isImm0_65535Expr() const1255   bool isImm0_65535Expr() const {
1256     if (!isImm()) return false;
1257     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1258     // If it's not a constant expression, it'll generate a fixup and be
1259     // handled later.
1260     if (!CE) return true;
1261     int64_t Value = CE->getValue();
1262     return Value >= 0 && Value < 65536;
1263   }
1264 
isImm24bit() const1265   bool isImm24bit() const {
1266     return isImmediate<0, 0xffffff + 1>();
1267   }
1268 
isImmThumbSR() const1269   bool isImmThumbSR() const {
1270     return isImmediate<1, 33>();
1271   }
1272 
isPKHLSLImm() const1273   bool isPKHLSLImm() const {
1274     return isImmediate<0, 32>();
1275   }
1276 
isPKHASRImm() const1277   bool isPKHASRImm() const {
1278     return isImmediate<0, 33>();
1279   }
1280 
isAdrLabel() const1281   bool isAdrLabel() const {
1282     // If we have an immediate that's not a constant, treat it as a label
1283     // reference needing a fixup.
1284     if (isImm() && !isa<MCConstantExpr>(getImm()))
1285       return true;
1286 
1287     // If it is a constant, it must fit into a modified immediate encoding.
1288     if (!isImm()) return false;
1289     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1290     if (!CE) return false;
1291     int64_t Value = CE->getValue();
1292     return (ARM_AM::getSOImmVal(Value) != -1 ||
1293             ARM_AM::getSOImmVal(-Value) != -1);
1294   }
1295 
isT2SOImm() const1296   bool isT2SOImm() const {
1297     // If we have an immediate that's not a constant, treat it as an expression
1298     // needing a fixup.
1299     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1300       // We want to avoid matching :upper16: and :lower16: as we want these
1301       // expressions to match in isImm0_65535Expr()
1302       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1303       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1304                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1305     }
1306     if (!isImm()) return false;
1307     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308     if (!CE) return false;
1309     int64_t Value = CE->getValue();
1310     return ARM_AM::getT2SOImmVal(Value) != -1;
1311   }
1312 
isT2SOImmNot() const1313   bool isT2SOImmNot() const {
1314     if (!isImm()) return false;
1315     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1316     if (!CE) return false;
1317     int64_t Value = CE->getValue();
1318     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1319       ARM_AM::getT2SOImmVal(~Value) != -1;
1320   }
1321 
isT2SOImmNeg() const1322   bool isT2SOImmNeg() const {
1323     if (!isImm()) return false;
1324     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1325     if (!CE) return false;
1326     int64_t Value = CE->getValue();
1327     // Only use this when not representable as a plain so_imm.
1328     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1329       ARM_AM::getT2SOImmVal(-Value) != -1;
1330   }
1331 
isSetEndImm() const1332   bool isSetEndImm() const {
1333     if (!isImm()) return false;
1334     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1335     if (!CE) return false;
1336     int64_t Value = CE->getValue();
1337     return Value == 1 || Value == 0;
1338   }
1339 
isReg() const1340   bool isReg() const override { return Kind == k_Register; }
isRegList() const1341   bool isRegList() const { return Kind == k_RegisterList; }
isRegListWithAPSR() const1342   bool isRegListWithAPSR() const {
1343     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1344   }
isDPRRegList() const1345   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
isSPRRegList() const1346   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
isFPSRegListWithVPR() const1347   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
isFPDRegListWithVPR() const1348   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
isToken() const1349   bool isToken() const override { return Kind == k_Token; }
isMemBarrierOpt() const1350   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
isInstSyncBarrierOpt() const1351   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
isTraceSyncBarrierOpt() const1352   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
isMem() const1353   bool isMem() const override {
1354       return isGPRMem() || isMVEMem();
1355   }
isMVEMem() const1356   bool isMVEMem() const {
1357     if (Kind != k_Memory)
1358       return false;
1359     if (Memory.BaseRegNum &&
1360         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1361         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1362       return false;
1363     if (Memory.OffsetRegNum &&
1364         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1365             Memory.OffsetRegNum))
1366       return false;
1367     return true;
1368   }
isGPRMem() const1369   bool isGPRMem() const {
1370     if (Kind != k_Memory)
1371       return false;
1372     if (Memory.BaseRegNum &&
1373         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1374       return false;
1375     if (Memory.OffsetRegNum &&
1376         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1377       return false;
1378     return true;
1379   }
isShifterImm() const1380   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
isRegShiftedReg() const1381   bool isRegShiftedReg() const {
1382     return Kind == k_ShiftedRegister &&
1383            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1384                RegShiftedReg.SrcReg) &&
1385            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1386                RegShiftedReg.ShiftReg);
1387   }
isRegShiftedImm() const1388   bool isRegShiftedImm() const {
1389     return Kind == k_ShiftedImmediate &&
1390            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1391                RegShiftedImm.SrcReg);
1392   }
isRotImm() const1393   bool isRotImm() const { return Kind == k_RotateImmediate; }
1394 
1395   template<unsigned Min, unsigned Max>
isPowerTwoInRange() const1396   bool isPowerTwoInRange() const {
1397     if (!isImm()) return false;
1398     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1399     if (!CE) return false;
1400     int64_t Value = CE->getValue();
1401     return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1402            Value <= Max;
1403   }
isModImm() const1404   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1405 
isModImmNot() const1406   bool isModImmNot() const {
1407     if (!isImm()) return false;
1408     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1409     if (!CE) return false;
1410     int64_t Value = CE->getValue();
1411     return ARM_AM::getSOImmVal(~Value) != -1;
1412   }
1413 
isModImmNeg() const1414   bool isModImmNeg() const {
1415     if (!isImm()) return false;
1416     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1417     if (!CE) return false;
1418     int64_t Value = CE->getValue();
1419     return ARM_AM::getSOImmVal(Value) == -1 &&
1420       ARM_AM::getSOImmVal(-Value) != -1;
1421   }
1422 
isThumbModImmNeg1_7() const1423   bool isThumbModImmNeg1_7() const {
1424     if (!isImm()) return false;
1425     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1426     if (!CE) return false;
1427     int32_t Value = -(int32_t)CE->getValue();
1428     return 0 < Value && Value < 8;
1429   }
1430 
isThumbModImmNeg8_255() const1431   bool isThumbModImmNeg8_255() const {
1432     if (!isImm()) return false;
1433     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1434     if (!CE) return false;
1435     int32_t Value = -(int32_t)CE->getValue();
1436     return 7 < Value && Value < 256;
1437   }
1438 
isConstantPoolImm() const1439   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
isBitfield() const1440   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
isPostIdxRegShifted() const1441   bool isPostIdxRegShifted() const {
1442     return Kind == k_PostIndexRegister &&
1443            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1444   }
isPostIdxReg() const1445   bool isPostIdxReg() const {
1446     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1447   }
isMemNoOffset(bool alignOK=false,unsigned Alignment=0) const1448   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1449     if (!isGPRMem())
1450       return false;
1451     // No offset of any kind.
1452     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1453      (alignOK || Memory.Alignment == Alignment);
1454   }
isMemNoOffsetT2(bool alignOK=false,unsigned Alignment=0) const1455   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1456     if (!isGPRMem())
1457       return false;
1458 
1459     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1460             Memory.BaseRegNum))
1461       return false;
1462 
1463     // No offset of any kind.
1464     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1465      (alignOK || Memory.Alignment == Alignment);
1466   }
isMemNoOffsetT2NoSp(bool alignOK=false,unsigned Alignment=0) const1467   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1468     if (!isGPRMem())
1469       return false;
1470 
1471     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1472             Memory.BaseRegNum))
1473       return false;
1474 
1475     // No offset of any kind.
1476     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1477      (alignOK || Memory.Alignment == Alignment);
1478   }
isMemNoOffsetT(bool alignOK=false,unsigned Alignment=0) const1479   bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1480     if (!isGPRMem())
1481       return false;
1482 
1483     if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1484             Memory.BaseRegNum))
1485       return false;
1486 
1487     // No offset of any kind.
1488     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1489      (alignOK || Memory.Alignment == Alignment);
1490   }
isMemPCRelImm12() const1491   bool isMemPCRelImm12() const {
1492     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1493       return false;
1494     // Base register must be PC.
1495     if (Memory.BaseRegNum != ARM::PC)
1496       return false;
1497     // Immediate offset in range [-4095, 4095].
1498     if (!Memory.OffsetImm) return true;
1499     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1500       int64_t Val = CE->getValue();
1501       return (Val > -4096 && Val < 4096) ||
1502              (Val == std::numeric_limits<int32_t>::min());
1503     }
1504     return false;
1505   }
1506 
isAlignedMemory() const1507   bool isAlignedMemory() const {
1508     return isMemNoOffset(true);
1509   }
1510 
isAlignedMemoryNone() const1511   bool isAlignedMemoryNone() const {
1512     return isMemNoOffset(false, 0);
1513   }
1514 
isDupAlignedMemoryNone() const1515   bool isDupAlignedMemoryNone() const {
1516     return isMemNoOffset(false, 0);
1517   }
1518 
isAlignedMemory16() const1519   bool isAlignedMemory16() const {
1520     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1521       return true;
1522     return isMemNoOffset(false, 0);
1523   }
1524 
isDupAlignedMemory16() const1525   bool isDupAlignedMemory16() const {
1526     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1527       return true;
1528     return isMemNoOffset(false, 0);
1529   }
1530 
isAlignedMemory32() const1531   bool isAlignedMemory32() const {
1532     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1533       return true;
1534     return isMemNoOffset(false, 0);
1535   }
1536 
isDupAlignedMemory32() const1537   bool isDupAlignedMemory32() const {
1538     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1539       return true;
1540     return isMemNoOffset(false, 0);
1541   }
1542 
isAlignedMemory64() const1543   bool isAlignedMemory64() const {
1544     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1545       return true;
1546     return isMemNoOffset(false, 0);
1547   }
1548 
isDupAlignedMemory64() const1549   bool isDupAlignedMemory64() const {
1550     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1551       return true;
1552     return isMemNoOffset(false, 0);
1553   }
1554 
isAlignedMemory64or128() const1555   bool isAlignedMemory64or128() const {
1556     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1557       return true;
1558     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1559       return true;
1560     return isMemNoOffset(false, 0);
1561   }
1562 
isDupAlignedMemory64or128() const1563   bool isDupAlignedMemory64or128() const {
1564     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1565       return true;
1566     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1567       return true;
1568     return isMemNoOffset(false, 0);
1569   }
1570 
isAlignedMemory64or128or256() const1571   bool isAlignedMemory64or128or256() const {
1572     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1573       return true;
1574     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1575       return true;
1576     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1577       return true;
1578     return isMemNoOffset(false, 0);
1579   }
1580 
isAddrMode2() const1581   bool isAddrMode2() const {
1582     if (!isGPRMem() || Memory.Alignment != 0) return false;
1583     // Check for register offset.
1584     if (Memory.OffsetRegNum) return true;
1585     // Immediate offset in range [-4095, 4095].
1586     if (!Memory.OffsetImm) return true;
1587     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1588       int64_t Val = CE->getValue();
1589       return Val > -4096 && Val < 4096;
1590     }
1591     return false;
1592   }
1593 
isAM2OffsetImm() const1594   bool isAM2OffsetImm() const {
1595     if (!isImm()) return false;
1596     // Immediate offset in range [-4095, 4095].
1597     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1598     if (!CE) return false;
1599     int64_t Val = CE->getValue();
1600     return (Val == std::numeric_limits<int32_t>::min()) ||
1601            (Val > -4096 && Val < 4096);
1602   }
1603 
isAddrMode3() const1604   bool isAddrMode3() const {
1605     // If we have an immediate that's not a constant, treat it as a label
1606     // reference needing a fixup. If it is a constant, it's something else
1607     // and we reject it.
1608     if (isImm() && !isa<MCConstantExpr>(getImm()))
1609       return true;
1610     if (!isGPRMem() || Memory.Alignment != 0) return false;
1611     // No shifts are legal for AM3.
1612     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1613     // Check for register offset.
1614     if (Memory.OffsetRegNum) return true;
1615     // Immediate offset in range [-255, 255].
1616     if (!Memory.OffsetImm) return true;
1617     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1618       int64_t Val = CE->getValue();
1619       // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1620       // we have to check for this too.
1621       return (Val > -256 && Val < 256) ||
1622              Val == std::numeric_limits<int32_t>::min();
1623     }
1624     return false;
1625   }
1626 
isAM3Offset() const1627   bool isAM3Offset() const {
1628     if (isPostIdxReg())
1629       return true;
1630     if (!isImm())
1631       return false;
1632     // Immediate offset in range [-255, 255].
1633     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1634     if (!CE) return false;
1635     int64_t Val = CE->getValue();
1636     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1637     return (Val > -256 && Val < 256) ||
1638            Val == std::numeric_limits<int32_t>::min();
1639   }
1640 
isAddrMode5() const1641   bool isAddrMode5() const {
1642     // If we have an immediate that's not a constant, treat it as a label
1643     // reference needing a fixup. If it is a constant, it's something else
1644     // and we reject it.
1645     if (isImm() && !isa<MCConstantExpr>(getImm()))
1646       return true;
1647     if (!isGPRMem() || Memory.Alignment != 0) return false;
1648     // Check for register offset.
1649     if (Memory.OffsetRegNum) return false;
1650     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1651     if (!Memory.OffsetImm) return true;
1652     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1653       int64_t Val = CE->getValue();
1654       return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1655              Val == std::numeric_limits<int32_t>::min();
1656     }
1657     return false;
1658   }
1659 
isAddrMode5FP16() const1660   bool isAddrMode5FP16() const {
1661     // If we have an immediate that's not a constant, treat it as a label
1662     // reference needing a fixup. If it is a constant, it's something else
1663     // and we reject it.
1664     if (isImm() && !isa<MCConstantExpr>(getImm()))
1665       return true;
1666     if (!isGPRMem() || Memory.Alignment != 0) return false;
1667     // Check for register offset.
1668     if (Memory.OffsetRegNum) return false;
1669     // Immediate offset in range [-510, 510] and a multiple of 2.
1670     if (!Memory.OffsetImm) return true;
1671     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1672       int64_t Val = CE->getValue();
1673       return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1674              Val == std::numeric_limits<int32_t>::min();
1675     }
1676     return false;
1677   }
1678 
isMemTBB() const1679   bool isMemTBB() const {
1680     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1681         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1682       return false;
1683     return true;
1684   }
1685 
isMemTBH() const1686   bool isMemTBH() const {
1687     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1688         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1689         Memory.Alignment != 0 )
1690       return false;
1691     return true;
1692   }
1693 
isMemRegOffset() const1694   bool isMemRegOffset() const {
1695     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1696       return false;
1697     return true;
1698   }
1699 
isT2MemRegOffset() const1700   bool isT2MemRegOffset() const {
1701     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1702         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1703       return false;
1704     // Only lsl #{0, 1, 2, 3} allowed.
1705     if (Memory.ShiftType == ARM_AM::no_shift)
1706       return true;
1707     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1708       return false;
1709     return true;
1710   }
1711 
isMemThumbRR() const1712   bool isMemThumbRR() const {
1713     // Thumb reg+reg addressing is simple. Just two registers, a base and
1714     // an offset. No shifts, negations or any other complicating factors.
1715     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1716         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1717       return false;
1718     return isARMLowRegister(Memory.BaseRegNum) &&
1719       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1720   }
1721 
isMemThumbRIs4() const1722   bool isMemThumbRIs4() const {
1723     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1724         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1725       return false;
1726     // Immediate offset, multiple of 4 in range [0, 124].
1727     if (!Memory.OffsetImm) return true;
1728     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1729       int64_t Val = CE->getValue();
1730       return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1731     }
1732     return false;
1733   }
1734 
isMemThumbRIs2() const1735   bool isMemThumbRIs2() const {
1736     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1737         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1738       return false;
1739     // Immediate offset, multiple of 4 in range [0, 62].
1740     if (!Memory.OffsetImm) return true;
1741     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1742       int64_t Val = CE->getValue();
1743       return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1744     }
1745     return false;
1746   }
1747 
isMemThumbRIs1() const1748   bool isMemThumbRIs1() const {
1749     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1750         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1751       return false;
1752     // Immediate offset in range [0, 31].
1753     if (!Memory.OffsetImm) return true;
1754     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1755       int64_t Val = CE->getValue();
1756       return Val >= 0 && Val <= 31;
1757     }
1758     return false;
1759   }
1760 
isMemThumbSPI() const1761   bool isMemThumbSPI() const {
1762     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1763         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1764       return false;
1765     // Immediate offset, multiple of 4 in range [0, 1020].
1766     if (!Memory.OffsetImm) return true;
1767     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1768       int64_t Val = CE->getValue();
1769       return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1770     }
1771     return false;
1772   }
1773 
isMemImm8s4Offset() const1774   bool isMemImm8s4Offset() const {
1775     // If we have an immediate that's not a constant, treat it as a label
1776     // reference needing a fixup. If it is a constant, it's something else
1777     // and we reject it.
1778     if (isImm() && !isa<MCConstantExpr>(getImm()))
1779       return true;
1780     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1781       return false;
1782     // Immediate offset a multiple of 4 in range [-1020, 1020].
1783     if (!Memory.OffsetImm) return true;
1784     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1785       int64_t Val = CE->getValue();
1786       // Special case, #-0 is std::numeric_limits<int32_t>::min().
1787       return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1788              Val == std::numeric_limits<int32_t>::min();
1789     }
1790     return false;
1791   }
1792 
isMemImm7s4Offset() const1793   bool isMemImm7s4Offset() const {
1794     // If we have an immediate that's not a constant, treat it as a label
1795     // reference needing a fixup. If it is a constant, it's something else
1796     // and we reject it.
1797     if (isImm() && !isa<MCConstantExpr>(getImm()))
1798       return true;
1799     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1800         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1801             Memory.BaseRegNum))
1802       return false;
1803     // Immediate offset a multiple of 4 in range [-508, 508].
1804     if (!Memory.OffsetImm) return true;
1805     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1806       int64_t Val = CE->getValue();
1807       // Special case, #-0 is INT32_MIN.
1808       return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1809     }
1810     return false;
1811   }
1812 
isMemImm0_1020s4Offset() const1813   bool isMemImm0_1020s4Offset() const {
1814     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1815       return false;
1816     // Immediate offset a multiple of 4 in range [0, 1020].
1817     if (!Memory.OffsetImm) return true;
1818     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1819       int64_t Val = CE->getValue();
1820       return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1821     }
1822     return false;
1823   }
1824 
isMemImm8Offset() const1825   bool isMemImm8Offset() const {
1826     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1827       return false;
1828     // Base reg of PC isn't allowed for these encodings.
1829     if (Memory.BaseRegNum == ARM::PC) return false;
1830     // Immediate offset in range [-255, 255].
1831     if (!Memory.OffsetImm) return true;
1832     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1833       int64_t Val = CE->getValue();
1834       return (Val == std::numeric_limits<int32_t>::min()) ||
1835              (Val > -256 && Val < 256);
1836     }
1837     return false;
1838   }
1839 
1840   template<unsigned Bits, unsigned RegClassID>
isMemImm7ShiftedOffset() const1841   bool isMemImm7ShiftedOffset() const {
1842     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1843         !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1844       return false;
1845 
1846     // Expect an immediate offset equal to an element of the range
1847     // [-127, 127], shifted left by Bits.
1848 
1849     if (!Memory.OffsetImm) return true;
1850     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1851       int64_t Val = CE->getValue();
1852 
1853       // INT32_MIN is a special-case value (indicating the encoding with
1854       // zero offset and the subtract bit set)
1855       if (Val == INT32_MIN)
1856         return true;
1857 
1858       unsigned Divisor = 1U << Bits;
1859 
1860       // Check that the low bits are zero
1861       if (Val % Divisor != 0)
1862         return false;
1863 
1864       // Check that the remaining offset is within range.
1865       Val /= Divisor;
1866       return (Val >= -127 && Val <= 127);
1867     }
1868     return false;
1869   }
1870 
isMemRegRQOffset() const1871   template <int shift> bool isMemRegRQOffset() const {
1872     if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1873       return false;
1874 
1875     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1876             Memory.BaseRegNum))
1877       return false;
1878     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1879             Memory.OffsetRegNum))
1880       return false;
1881 
1882     if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1883       return false;
1884 
1885     if (shift > 0 &&
1886         (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1887       return false;
1888 
1889     return true;
1890   }
1891 
isMemRegQOffset() const1892   template <int shift> bool isMemRegQOffset() const {
1893     if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1894       return false;
1895 
1896     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1897             Memory.BaseRegNum))
1898       return false;
1899 
1900     if (!Memory.OffsetImm)
1901       return true;
1902     static_assert(shift < 56,
1903                   "Such that we dont shift by a value higher than 62");
1904     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1905       int64_t Val = CE->getValue();
1906 
1907       // The value must be a multiple of (1 << shift)
1908       if ((Val & ((1U << shift) - 1)) != 0)
1909         return false;
1910 
1911       // And be in the right range, depending on the amount that it is shifted
1912       // by.  Shift 0, is equal to 7 unsigned bits, the sign bit is set
1913       // separately.
1914       int64_t Range = (1U << (7 + shift)) - 1;
1915       return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1916     }
1917     return false;
1918   }
1919 
isMemPosImm8Offset() const1920   bool isMemPosImm8Offset() const {
1921     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1922       return false;
1923     // Immediate offset in range [0, 255].
1924     if (!Memory.OffsetImm) return true;
1925     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1926       int64_t Val = CE->getValue();
1927       return Val >= 0 && Val < 256;
1928     }
1929     return false;
1930   }
1931 
isMemNegImm8Offset() const1932   bool isMemNegImm8Offset() const {
1933     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1934       return false;
1935     // Base reg of PC isn't allowed for these encodings.
1936     if (Memory.BaseRegNum == ARM::PC) return false;
1937     // Immediate offset in range [-255, -1].
1938     if (!Memory.OffsetImm) return false;
1939     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1940       int64_t Val = CE->getValue();
1941       return (Val == std::numeric_limits<int32_t>::min()) ||
1942              (Val > -256 && Val < 0);
1943     }
1944     return false;
1945   }
1946 
isMemUImm12Offset() const1947   bool isMemUImm12Offset() const {
1948     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1949       return false;
1950     // Immediate offset in range [0, 4095].
1951     if (!Memory.OffsetImm) return true;
1952     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1953       int64_t Val = CE->getValue();
1954       return (Val >= 0 && Val < 4096);
1955     }
1956     return false;
1957   }
1958 
isMemImm12Offset() const1959   bool isMemImm12Offset() const {
1960     // If we have an immediate that's not a constant, treat it as a label
1961     // reference needing a fixup. If it is a constant, it's something else
1962     // and we reject it.
1963 
1964     if (isImm() && !isa<MCConstantExpr>(getImm()))
1965       return true;
1966 
1967     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1968       return false;
1969     // Immediate offset in range [-4095, 4095].
1970     if (!Memory.OffsetImm) return true;
1971     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1972       int64_t Val = CE->getValue();
1973       return (Val > -4096 && Val < 4096) ||
1974              (Val == std::numeric_limits<int32_t>::min());
1975     }
1976     // If we have an immediate that's not a constant, treat it as a
1977     // symbolic expression needing a fixup.
1978     return true;
1979   }
1980 
isConstPoolAsmImm() const1981   bool isConstPoolAsmImm() const {
1982     // Delay processing of Constant Pool Immediate, this will turn into
1983     // a constant. Match no other operand
1984     return (isConstantPoolImm());
1985   }
1986 
isPostIdxImm8() const1987   bool isPostIdxImm8() const {
1988     if (!isImm()) return false;
1989     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1990     if (!CE) return false;
1991     int64_t Val = CE->getValue();
1992     return (Val > -256 && Val < 256) ||
1993            (Val == std::numeric_limits<int32_t>::min());
1994   }
1995 
isPostIdxImm8s4() const1996   bool isPostIdxImm8s4() const {
1997     if (!isImm()) return false;
1998     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1999     if (!CE) return false;
2000     int64_t Val = CE->getValue();
2001     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2002            (Val == std::numeric_limits<int32_t>::min());
2003   }
2004 
isMSRMask() const2005   bool isMSRMask() const { return Kind == k_MSRMask; }
isBankedReg() const2006   bool isBankedReg() const { return Kind == k_BankedReg; }
isProcIFlags() const2007   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2008 
2009   // NEON operands.
isSingleSpacedVectorList() const2010   bool isSingleSpacedVectorList() const {
2011     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2012   }
2013 
isDoubleSpacedVectorList() const2014   bool isDoubleSpacedVectorList() const {
2015     return Kind == k_VectorList && VectorList.isDoubleSpaced;
2016   }
2017 
isVecListOneD() const2018   bool isVecListOneD() const {
2019     if (!isSingleSpacedVectorList()) return false;
2020     return VectorList.Count == 1;
2021   }
2022 
isVecListTwoMQ() const2023   bool isVecListTwoMQ() const {
2024     return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2025            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2026                VectorList.RegNum);
2027   }
2028 
isVecListDPair() const2029   bool isVecListDPair() const {
2030     if (!isSingleSpacedVectorList()) return false;
2031     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2032               .contains(VectorList.RegNum));
2033   }
2034 
isVecListThreeD() const2035   bool isVecListThreeD() const {
2036     if (!isSingleSpacedVectorList()) return false;
2037     return VectorList.Count == 3;
2038   }
2039 
isVecListFourD() const2040   bool isVecListFourD() const {
2041     if (!isSingleSpacedVectorList()) return false;
2042     return VectorList.Count == 4;
2043   }
2044 
isVecListDPairSpaced() const2045   bool isVecListDPairSpaced() const {
2046     if (Kind != k_VectorList) return false;
2047     if (isSingleSpacedVectorList()) return false;
2048     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2049               .contains(VectorList.RegNum));
2050   }
2051 
isVecListThreeQ() const2052   bool isVecListThreeQ() const {
2053     if (!isDoubleSpacedVectorList()) return false;
2054     return VectorList.Count == 3;
2055   }
2056 
isVecListFourQ() const2057   bool isVecListFourQ() const {
2058     if (!isDoubleSpacedVectorList()) return false;
2059     return VectorList.Count == 4;
2060   }
2061 
isVecListFourMQ() const2062   bool isVecListFourMQ() const {
2063     return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2064            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2065                VectorList.RegNum);
2066   }
2067 
isSingleSpacedVectorAllLanes() const2068   bool isSingleSpacedVectorAllLanes() const {
2069     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2070   }
2071 
isDoubleSpacedVectorAllLanes() const2072   bool isDoubleSpacedVectorAllLanes() const {
2073     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2074   }
2075 
isVecListOneDAllLanes() const2076   bool isVecListOneDAllLanes() const {
2077     if (!isSingleSpacedVectorAllLanes()) return false;
2078     return VectorList.Count == 1;
2079   }
2080 
isVecListDPairAllLanes() const2081   bool isVecListDPairAllLanes() const {
2082     if (!isSingleSpacedVectorAllLanes()) return false;
2083     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2084               .contains(VectorList.RegNum));
2085   }
2086 
isVecListDPairSpacedAllLanes() const2087   bool isVecListDPairSpacedAllLanes() const {
2088     if (!isDoubleSpacedVectorAllLanes()) return false;
2089     return VectorList.Count == 2;
2090   }
2091 
isVecListThreeDAllLanes() const2092   bool isVecListThreeDAllLanes() const {
2093     if (!isSingleSpacedVectorAllLanes()) return false;
2094     return VectorList.Count == 3;
2095   }
2096 
isVecListThreeQAllLanes() const2097   bool isVecListThreeQAllLanes() const {
2098     if (!isDoubleSpacedVectorAllLanes()) return false;
2099     return VectorList.Count == 3;
2100   }
2101 
isVecListFourDAllLanes() const2102   bool isVecListFourDAllLanes() const {
2103     if (!isSingleSpacedVectorAllLanes()) return false;
2104     return VectorList.Count == 4;
2105   }
2106 
isVecListFourQAllLanes() const2107   bool isVecListFourQAllLanes() const {
2108     if (!isDoubleSpacedVectorAllLanes()) return false;
2109     return VectorList.Count == 4;
2110   }
2111 
isSingleSpacedVectorIndexed() const2112   bool isSingleSpacedVectorIndexed() const {
2113     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2114   }
2115 
isDoubleSpacedVectorIndexed() const2116   bool isDoubleSpacedVectorIndexed() const {
2117     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2118   }
2119 
isVecListOneDByteIndexed() const2120   bool isVecListOneDByteIndexed() const {
2121     if (!isSingleSpacedVectorIndexed()) return false;
2122     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2123   }
2124 
isVecListOneDHWordIndexed() const2125   bool isVecListOneDHWordIndexed() const {
2126     if (!isSingleSpacedVectorIndexed()) return false;
2127     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2128   }
2129 
isVecListOneDWordIndexed() const2130   bool isVecListOneDWordIndexed() const {
2131     if (!isSingleSpacedVectorIndexed()) return false;
2132     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2133   }
2134 
isVecListTwoDByteIndexed() const2135   bool isVecListTwoDByteIndexed() const {
2136     if (!isSingleSpacedVectorIndexed()) return false;
2137     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2138   }
2139 
isVecListTwoDHWordIndexed() const2140   bool isVecListTwoDHWordIndexed() const {
2141     if (!isSingleSpacedVectorIndexed()) return false;
2142     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2143   }
2144 
isVecListTwoQWordIndexed() const2145   bool isVecListTwoQWordIndexed() const {
2146     if (!isDoubleSpacedVectorIndexed()) return false;
2147     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2148   }
2149 
isVecListTwoQHWordIndexed() const2150   bool isVecListTwoQHWordIndexed() const {
2151     if (!isDoubleSpacedVectorIndexed()) return false;
2152     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2153   }
2154 
isVecListTwoDWordIndexed() const2155   bool isVecListTwoDWordIndexed() const {
2156     if (!isSingleSpacedVectorIndexed()) return false;
2157     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2158   }
2159 
isVecListThreeDByteIndexed() const2160   bool isVecListThreeDByteIndexed() const {
2161     if (!isSingleSpacedVectorIndexed()) return false;
2162     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2163   }
2164 
isVecListThreeDHWordIndexed() const2165   bool isVecListThreeDHWordIndexed() const {
2166     if (!isSingleSpacedVectorIndexed()) return false;
2167     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2168   }
2169 
isVecListThreeQWordIndexed() const2170   bool isVecListThreeQWordIndexed() const {
2171     if (!isDoubleSpacedVectorIndexed()) return false;
2172     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2173   }
2174 
isVecListThreeQHWordIndexed() const2175   bool isVecListThreeQHWordIndexed() const {
2176     if (!isDoubleSpacedVectorIndexed()) return false;
2177     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2178   }
2179 
isVecListThreeDWordIndexed() const2180   bool isVecListThreeDWordIndexed() const {
2181     if (!isSingleSpacedVectorIndexed()) return false;
2182     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2183   }
2184 
isVecListFourDByteIndexed() const2185   bool isVecListFourDByteIndexed() const {
2186     if (!isSingleSpacedVectorIndexed()) return false;
2187     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2188   }
2189 
isVecListFourDHWordIndexed() const2190   bool isVecListFourDHWordIndexed() const {
2191     if (!isSingleSpacedVectorIndexed()) return false;
2192     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2193   }
2194 
isVecListFourQWordIndexed() const2195   bool isVecListFourQWordIndexed() const {
2196     if (!isDoubleSpacedVectorIndexed()) return false;
2197     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2198   }
2199 
isVecListFourQHWordIndexed() const2200   bool isVecListFourQHWordIndexed() const {
2201     if (!isDoubleSpacedVectorIndexed()) return false;
2202     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2203   }
2204 
isVecListFourDWordIndexed() const2205   bool isVecListFourDWordIndexed() const {
2206     if (!isSingleSpacedVectorIndexed()) return false;
2207     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2208   }
2209 
isVectorIndex() const2210   bool isVectorIndex() const { return Kind == k_VectorIndex; }
2211 
2212   template <unsigned NumLanes>
isVectorIndexInRange() const2213   bool isVectorIndexInRange() const {
2214     if (Kind != k_VectorIndex) return false;
2215     return VectorIndex.Val < NumLanes;
2216   }
2217 
isVectorIndex8() const2218   bool isVectorIndex8()  const { return isVectorIndexInRange<8>(); }
isVectorIndex16() const2219   bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
isVectorIndex32() const2220   bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
isVectorIndex64() const2221   bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2222 
2223   template<int PermittedValue, int OtherPermittedValue>
isMVEPairVectorIndex() const2224   bool isMVEPairVectorIndex() const {
2225     if (Kind != k_VectorIndex) return false;
2226     return VectorIndex.Val == PermittedValue ||
2227            VectorIndex.Val == OtherPermittedValue;
2228   }
2229 
isNEONi8splat() const2230   bool isNEONi8splat() const {
2231     if (!isImm()) return false;
2232     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2233     // Must be a constant.
2234     if (!CE) return false;
2235     int64_t Value = CE->getValue();
2236     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2237     // value.
2238     return Value >= 0 && Value < 256;
2239   }
2240 
isNEONi16splat() const2241   bool isNEONi16splat() const {
2242     if (isNEONByteReplicate(2))
2243       return false; // Leave that for bytes replication and forbid by default.
2244     if (!isImm())
2245       return false;
2246     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2247     // Must be a constant.
2248     if (!CE) return false;
2249     unsigned Value = CE->getValue();
2250     return ARM_AM::isNEONi16splat(Value);
2251   }
2252 
isNEONi16splatNot() const2253   bool isNEONi16splatNot() const {
2254     if (!isImm())
2255       return false;
2256     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2257     // Must be a constant.
2258     if (!CE) return false;
2259     unsigned Value = CE->getValue();
2260     return ARM_AM::isNEONi16splat(~Value & 0xffff);
2261   }
2262 
isNEONi32splat() const2263   bool isNEONi32splat() const {
2264     if (isNEONByteReplicate(4))
2265       return false; // Leave that for bytes replication and forbid by default.
2266     if (!isImm())
2267       return false;
2268     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2269     // Must be a constant.
2270     if (!CE) return false;
2271     unsigned Value = CE->getValue();
2272     return ARM_AM::isNEONi32splat(Value);
2273   }
2274 
isNEONi32splatNot() const2275   bool isNEONi32splatNot() const {
2276     if (!isImm())
2277       return false;
2278     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2279     // Must be a constant.
2280     if (!CE) return false;
2281     unsigned Value = CE->getValue();
2282     return ARM_AM::isNEONi32splat(~Value);
2283   }
2284 
isValidNEONi32vmovImm(int64_t Value)2285   static bool isValidNEONi32vmovImm(int64_t Value) {
2286     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2287     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2288     return ((Value & 0xffffffffffffff00) == 0) ||
2289            ((Value & 0xffffffffffff00ff) == 0) ||
2290            ((Value & 0xffffffffff00ffff) == 0) ||
2291            ((Value & 0xffffffff00ffffff) == 0) ||
2292            ((Value & 0xffffffffffff00ff) == 0xff) ||
2293            ((Value & 0xffffffffff00ffff) == 0xffff);
2294   }
2295 
isNEONReplicate(unsigned Width,unsigned NumElems,bool Inv) const2296   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2297     assert((Width == 8 || Width == 16 || Width == 32) &&
2298            "Invalid element width");
2299     assert(NumElems * Width <= 64 && "Invalid result width");
2300 
2301     if (!isImm())
2302       return false;
2303     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2304     // Must be a constant.
2305     if (!CE)
2306       return false;
2307     int64_t Value = CE->getValue();
2308     if (!Value)
2309       return false; // Don't bother with zero.
2310     if (Inv)
2311       Value = ~Value;
2312 
2313     uint64_t Mask = (1ull << Width) - 1;
2314     uint64_t Elem = Value & Mask;
2315     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2316       return false;
2317     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2318       return false;
2319 
2320     for (unsigned i = 1; i < NumElems; ++i) {
2321       Value >>= Width;
2322       if ((Value & Mask) != Elem)
2323         return false;
2324     }
2325     return true;
2326   }
2327 
isNEONByteReplicate(unsigned NumBytes) const2328   bool isNEONByteReplicate(unsigned NumBytes) const {
2329     return isNEONReplicate(8, NumBytes, false);
2330   }
2331 
checkNeonReplicateArgs(unsigned FromW,unsigned ToW)2332   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2333     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2334            "Invalid source width");
2335     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2336            "Invalid destination width");
2337     assert(FromW < ToW && "ToW is not less than FromW");
2338   }
2339 
2340   template<unsigned FromW, unsigned ToW>
isNEONmovReplicate() const2341   bool isNEONmovReplicate() const {
2342     checkNeonReplicateArgs(FromW, ToW);
2343     if (ToW == 64 && isNEONi64splat())
2344       return false;
2345     return isNEONReplicate(FromW, ToW / FromW, false);
2346   }
2347 
2348   template<unsigned FromW, unsigned ToW>
isNEONinvReplicate() const2349   bool isNEONinvReplicate() const {
2350     checkNeonReplicateArgs(FromW, ToW);
2351     return isNEONReplicate(FromW, ToW / FromW, true);
2352   }
2353 
isNEONi32vmov() const2354   bool isNEONi32vmov() const {
2355     if (isNEONByteReplicate(4))
2356       return false; // Let it to be classified as byte-replicate case.
2357     if (!isImm())
2358       return false;
2359     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2360     // Must be a constant.
2361     if (!CE)
2362       return false;
2363     return isValidNEONi32vmovImm(CE->getValue());
2364   }
2365 
isNEONi32vmovNeg() const2366   bool isNEONi32vmovNeg() const {
2367     if (!isImm()) return false;
2368     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2369     // Must be a constant.
2370     if (!CE) return false;
2371     return isValidNEONi32vmovImm(~CE->getValue());
2372   }
2373 
isNEONi64splat() const2374   bool isNEONi64splat() const {
2375     if (!isImm()) return false;
2376     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2377     // Must be a constant.
2378     if (!CE) return false;
2379     uint64_t Value = CE->getValue();
2380     // i64 value with each byte being either 0 or 0xff.
2381     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2382       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2383     return true;
2384   }
2385 
2386   template<int64_t Angle, int64_t Remainder>
isComplexRotation() const2387   bool isComplexRotation() const {
2388     if (!isImm()) return false;
2389 
2390     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2391     if (!CE) return false;
2392     uint64_t Value = CE->getValue();
2393 
2394     return (Value % Angle == Remainder && Value <= 270);
2395   }
2396 
isMVELongShift() const2397   bool isMVELongShift() const {
2398     if (!isImm()) return false;
2399     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2400     // Must be a constant.
2401     if (!CE) return false;
2402     uint64_t Value = CE->getValue();
2403     return Value >= 1 && Value <= 32;
2404   }
2405 
isMveSaturateOp() const2406   bool isMveSaturateOp() const {
2407     if (!isImm()) return false;
2408     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2409     if (!CE) return false;
2410     uint64_t Value = CE->getValue();
2411     return Value == 48 || Value == 64;
2412   }
2413 
isITCondCodeNoAL() const2414   bool isITCondCodeNoAL() const {
2415     if (!isITCondCode()) return false;
2416     ARMCC::CondCodes CC = getCondCode();
2417     return CC != ARMCC::AL;
2418   }
2419 
isITCondCodeRestrictedI() const2420   bool isITCondCodeRestrictedI() const {
2421     if (!isITCondCode())
2422       return false;
2423     ARMCC::CondCodes CC = getCondCode();
2424     return CC == ARMCC::EQ || CC == ARMCC::NE;
2425   }
2426 
isITCondCodeRestrictedS() const2427   bool isITCondCodeRestrictedS() const {
2428     if (!isITCondCode())
2429       return false;
2430     ARMCC::CondCodes CC = getCondCode();
2431     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2432            CC == ARMCC::GE;
2433   }
2434 
isITCondCodeRestrictedU() const2435   bool isITCondCodeRestrictedU() const {
2436     if (!isITCondCode())
2437       return false;
2438     ARMCC::CondCodes CC = getCondCode();
2439     return CC == ARMCC::HS || CC == ARMCC::HI;
2440   }
2441 
isITCondCodeRestrictedFP() const2442   bool isITCondCodeRestrictedFP() const {
2443     if (!isITCondCode())
2444       return false;
2445     ARMCC::CondCodes CC = getCondCode();
2446     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2447            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2448   }
2449 
addExpr(MCInst & Inst,const MCExpr * Expr) const2450   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2451     // Add as immediates when possible.  Null MCExpr = 0.
2452     if (!Expr)
2453       Inst.addOperand(MCOperand::createImm(0));
2454     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2455       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2456     else
2457       Inst.addOperand(MCOperand::createExpr(Expr));
2458   }
2459 
addARMBranchTargetOperands(MCInst & Inst,unsigned N) const2460   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2461     assert(N == 1 && "Invalid number of operands!");
2462     addExpr(Inst, getImm());
2463   }
2464 
addThumbBranchTargetOperands(MCInst & Inst,unsigned N) const2465   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2466     assert(N == 1 && "Invalid number of operands!");
2467     addExpr(Inst, getImm());
2468   }
2469 
addCondCodeOperands(MCInst & Inst,unsigned N) const2470   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2471     assert(N == 2 && "Invalid number of operands!");
2472     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2473     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2474     Inst.addOperand(MCOperand::createReg(RegNum));
2475   }
2476 
addVPTPredNOperands(MCInst & Inst,unsigned N) const2477   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2478     assert(N == 3 && "Invalid number of operands!");
2479     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2480     unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2481     Inst.addOperand(MCOperand::createReg(RegNum));
2482     Inst.addOperand(MCOperand::createReg(0));
2483   }
2484 
addVPTPredROperands(MCInst & Inst,unsigned N) const2485   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2486     assert(N == 4 && "Invalid number of operands!");
2487     addVPTPredNOperands(Inst, N-1);
2488     unsigned RegNum;
2489     if (getVPTPred() == ARMVCC::None) {
2490       RegNum = 0;
2491     } else {
2492       unsigned NextOpIndex = Inst.getNumOperands();
2493       const MCInstrDesc &MCID =
2494           ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
2495       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2496       assert(TiedOp >= 0 &&
2497              "Inactive register in vpred_r is not tied to an output!");
2498       RegNum = Inst.getOperand(TiedOp).getReg();
2499     }
2500     Inst.addOperand(MCOperand::createReg(RegNum));
2501   }
2502 
addCoprocNumOperands(MCInst & Inst,unsigned N) const2503   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2504     assert(N == 1 && "Invalid number of operands!");
2505     Inst.addOperand(MCOperand::createImm(getCoproc()));
2506   }
2507 
addCoprocRegOperands(MCInst & Inst,unsigned N) const2508   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2509     assert(N == 1 && "Invalid number of operands!");
2510     Inst.addOperand(MCOperand::createImm(getCoproc()));
2511   }
2512 
addCoprocOptionOperands(MCInst & Inst,unsigned N) const2513   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2514     assert(N == 1 && "Invalid number of operands!");
2515     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2516   }
2517 
addITMaskOperands(MCInst & Inst,unsigned N) const2518   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2519     assert(N == 1 && "Invalid number of operands!");
2520     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2521   }
2522 
addITCondCodeOperands(MCInst & Inst,unsigned N) const2523   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2524     assert(N == 1 && "Invalid number of operands!");
2525     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2526   }
2527 
addITCondCodeInvOperands(MCInst & Inst,unsigned N) const2528   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2529     assert(N == 1 && "Invalid number of operands!");
2530     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2531   }
2532 
addCCOutOperands(MCInst & Inst,unsigned N) const2533   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2534     assert(N == 1 && "Invalid number of operands!");
2535     Inst.addOperand(MCOperand::createReg(getReg()));
2536   }
2537 
addRegOperands(MCInst & Inst,unsigned N) const2538   void addRegOperands(MCInst &Inst, unsigned N) const {
2539     assert(N == 1 && "Invalid number of operands!");
2540     Inst.addOperand(MCOperand::createReg(getReg()));
2541   }
2542 
addRegShiftedRegOperands(MCInst & Inst,unsigned N) const2543   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2544     assert(N == 3 && "Invalid number of operands!");
2545     assert(isRegShiftedReg() &&
2546            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2547     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2548     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2549     Inst.addOperand(MCOperand::createImm(
2550       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2551   }
2552 
addRegShiftedImmOperands(MCInst & Inst,unsigned N) const2553   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2554     assert(N == 2 && "Invalid number of operands!");
2555     assert(isRegShiftedImm() &&
2556            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2557     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2558     // Shift of #32 is encoded as 0 where permitted
2559     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2560     Inst.addOperand(MCOperand::createImm(
2561       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2562   }
2563 
addShifterImmOperands(MCInst & Inst,unsigned N) const2564   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2565     assert(N == 1 && "Invalid number of operands!");
2566     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2567                                          ShifterImm.Imm));
2568   }
2569 
addRegListOperands(MCInst & Inst,unsigned N) const2570   void addRegListOperands(MCInst &Inst, unsigned N) const {
2571     assert(N == 1 && "Invalid number of operands!");
2572     const SmallVectorImpl<unsigned> &RegList = getRegList();
2573     for (unsigned Reg : RegList)
2574       Inst.addOperand(MCOperand::createReg(Reg));
2575   }
2576 
addRegListWithAPSROperands(MCInst & Inst,unsigned N) const2577   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2578     assert(N == 1 && "Invalid number of operands!");
2579     const SmallVectorImpl<unsigned> &RegList = getRegList();
2580     for (unsigned Reg : RegList)
2581       Inst.addOperand(MCOperand::createReg(Reg));
2582   }
2583 
addDPRRegListOperands(MCInst & Inst,unsigned N) const2584   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2585     addRegListOperands(Inst, N);
2586   }
2587 
addSPRRegListOperands(MCInst & Inst,unsigned N) const2588   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2589     addRegListOperands(Inst, N);
2590   }
2591 
addFPSRegListWithVPROperands(MCInst & Inst,unsigned N) const2592   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2593     addRegListOperands(Inst, N);
2594   }
2595 
addFPDRegListWithVPROperands(MCInst & Inst,unsigned N) const2596   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2597     addRegListOperands(Inst, N);
2598   }
2599 
addRotImmOperands(MCInst & Inst,unsigned N) const2600   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2601     assert(N == 1 && "Invalid number of operands!");
2602     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2603     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2604   }
2605 
addModImmOperands(MCInst & Inst,unsigned N) const2606   void addModImmOperands(MCInst &Inst, unsigned N) const {
2607     assert(N == 1 && "Invalid number of operands!");
2608 
2609     // Support for fixups (MCFixup)
2610     if (isImm())
2611       return addImmOperands(Inst, N);
2612 
2613     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2614   }
2615 
addModImmNotOperands(MCInst & Inst,unsigned N) const2616   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2617     assert(N == 1 && "Invalid number of operands!");
2618     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2619     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2620     Inst.addOperand(MCOperand::createImm(Enc));
2621   }
2622 
addModImmNegOperands(MCInst & Inst,unsigned N) const2623   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2624     assert(N == 1 && "Invalid number of operands!");
2625     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2626     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2627     Inst.addOperand(MCOperand::createImm(Enc));
2628   }
2629 
addThumbModImmNeg8_255Operands(MCInst & Inst,unsigned N) const2630   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2631     assert(N == 1 && "Invalid number of operands!");
2632     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2633     uint32_t Val = -CE->getValue();
2634     Inst.addOperand(MCOperand::createImm(Val));
2635   }
2636 
addThumbModImmNeg1_7Operands(MCInst & Inst,unsigned N) const2637   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2638     assert(N == 1 && "Invalid number of operands!");
2639     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2640     uint32_t Val = -CE->getValue();
2641     Inst.addOperand(MCOperand::createImm(Val));
2642   }
2643 
addBitfieldOperands(MCInst & Inst,unsigned N) const2644   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2645     assert(N == 1 && "Invalid number of operands!");
2646     // Munge the lsb/width into a bitfield mask.
2647     unsigned lsb = Bitfield.LSB;
2648     unsigned width = Bitfield.Width;
2649     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2650     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2651                       (32 - (lsb + width)));
2652     Inst.addOperand(MCOperand::createImm(Mask));
2653   }
2654 
addImmOperands(MCInst & Inst,unsigned N) const2655   void addImmOperands(MCInst &Inst, unsigned N) const {
2656     assert(N == 1 && "Invalid number of operands!");
2657     addExpr(Inst, getImm());
2658   }
2659 
addFBits16Operands(MCInst & Inst,unsigned N) const2660   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2661     assert(N == 1 && "Invalid number of operands!");
2662     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2663     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2664   }
2665 
addFBits32Operands(MCInst & Inst,unsigned N) const2666   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2667     assert(N == 1 && "Invalid number of operands!");
2668     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2669     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2670   }
2671 
addFPImmOperands(MCInst & Inst,unsigned N) const2672   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2673     assert(N == 1 && "Invalid number of operands!");
2674     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2675     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2676     Inst.addOperand(MCOperand::createImm(Val));
2677   }
2678 
addImm8s4Operands(MCInst & Inst,unsigned N) const2679   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2680     assert(N == 1 && "Invalid number of operands!");
2681     // FIXME: We really want to scale the value here, but the LDRD/STRD
2682     // instruction don't encode operands that way yet.
2683     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2684     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2685   }
2686 
addImm7s4Operands(MCInst & Inst,unsigned N) const2687   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2688     assert(N == 1 && "Invalid number of operands!");
2689     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2690     // instruction don't encode operands that way yet.
2691     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2692     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2693   }
2694 
addImm7Shift0Operands(MCInst & Inst,unsigned N) const2695   void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2696     assert(N == 1 && "Invalid number of operands!");
2697     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2698     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2699   }
2700 
addImm7Shift1Operands(MCInst & Inst,unsigned N) const2701   void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2702     assert(N == 1 && "Invalid number of operands!");
2703     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2704     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2705   }
2706 
addImm7Shift2Operands(MCInst & Inst,unsigned N) const2707   void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2708     assert(N == 1 && "Invalid number of operands!");
2709     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2710     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2711   }
2712 
addImm7Operands(MCInst & Inst,unsigned N) const2713   void addImm7Operands(MCInst &Inst, unsigned N) const {
2714     assert(N == 1 && "Invalid number of operands!");
2715     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2716     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2717   }
2718 
addImm0_1020s4Operands(MCInst & Inst,unsigned N) const2719   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2720     assert(N == 1 && "Invalid number of operands!");
2721     // The immediate is scaled by four in the encoding and is stored
2722     // in the MCInst as such. Lop off the low two bits here.
2723     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2724     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2725   }
2726 
addImm0_508s4NegOperands(MCInst & Inst,unsigned N) const2727   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2728     assert(N == 1 && "Invalid number of operands!");
2729     // The immediate is scaled by four in the encoding and is stored
2730     // in the MCInst as such. Lop off the low two bits here.
2731     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2732     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2733   }
2734 
addImm0_508s4Operands(MCInst & Inst,unsigned N) const2735   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2736     assert(N == 1 && "Invalid number of operands!");
2737     // The immediate is scaled by four in the encoding and is stored
2738     // in the MCInst as such. Lop off the low two bits here.
2739     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2740     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2741   }
2742 
addImm1_16Operands(MCInst & Inst,unsigned N) const2743   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2744     assert(N == 1 && "Invalid number of operands!");
2745     // The constant encodes as the immediate-1, and we store in the instruction
2746     // the bits as encoded, so subtract off one here.
2747     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2748     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2749   }
2750 
addImm1_32Operands(MCInst & Inst,unsigned N) const2751   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2752     assert(N == 1 && "Invalid number of operands!");
2753     // The constant encodes as the immediate-1, and we store in the instruction
2754     // the bits as encoded, so subtract off one here.
2755     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2756     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2757   }
2758 
addImmThumbSROperands(MCInst & Inst,unsigned N) const2759   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2760     assert(N == 1 && "Invalid number of operands!");
2761     // The constant encodes as the immediate, except for 32, which encodes as
2762     // zero.
2763     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2764     unsigned Imm = CE->getValue();
2765     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2766   }
2767 
addPKHASRImmOperands(MCInst & Inst,unsigned N) const2768   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2769     assert(N == 1 && "Invalid number of operands!");
2770     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2771     // the instruction as well.
2772     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2773     int Val = CE->getValue();
2774     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2775   }
2776 
addT2SOImmNotOperands(MCInst & Inst,unsigned N) const2777   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2778     assert(N == 1 && "Invalid number of operands!");
2779     // The operand is actually a t2_so_imm, but we have its bitwise
2780     // negation in the assembly source, so twiddle it here.
2781     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2782     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2783   }
2784 
addT2SOImmNegOperands(MCInst & Inst,unsigned N) const2785   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2786     assert(N == 1 && "Invalid number of operands!");
2787     // The operand is actually a t2_so_imm, but we have its
2788     // negation in the assembly source, so twiddle it here.
2789     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2790     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2791   }
2792 
addImm0_4095NegOperands(MCInst & Inst,unsigned N) const2793   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2794     assert(N == 1 && "Invalid number of operands!");
2795     // The operand is actually an imm0_4095, but we have its
2796     // negation in the assembly source, so twiddle it here.
2797     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2798     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2799   }
2800 
addUnsignedOffset_b8s2Operands(MCInst & Inst,unsigned N) const2801   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2802     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2803       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2804       return;
2805     }
2806     const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2807     Inst.addOperand(MCOperand::createExpr(SR));
2808   }
2809 
addThumbMemPCOperands(MCInst & Inst,unsigned N) const2810   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2811     assert(N == 1 && "Invalid number of operands!");
2812     if (isImm()) {
2813       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2814       if (CE) {
2815         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2816         return;
2817       }
2818       const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2819       Inst.addOperand(MCOperand::createExpr(SR));
2820       return;
2821     }
2822 
2823     assert(isGPRMem()  && "Unknown value type!");
2824     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2825     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2826       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2827     else
2828       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2829   }
2830 
addMemBarrierOptOperands(MCInst & Inst,unsigned N) const2831   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2832     assert(N == 1 && "Invalid number of operands!");
2833     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2834   }
2835 
addInstSyncBarrierOptOperands(MCInst & Inst,unsigned N) const2836   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2837     assert(N == 1 && "Invalid number of operands!");
2838     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2839   }
2840 
addTraceSyncBarrierOptOperands(MCInst & Inst,unsigned N) const2841   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2842     assert(N == 1 && "Invalid number of operands!");
2843     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2844   }
2845 
addMemNoOffsetOperands(MCInst & Inst,unsigned N) const2846   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2847     assert(N == 1 && "Invalid number of operands!");
2848     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2849   }
2850 
addMemNoOffsetT2Operands(MCInst & Inst,unsigned N) const2851   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2852     assert(N == 1 && "Invalid number of operands!");
2853     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2854   }
2855 
addMemNoOffsetT2NoSpOperands(MCInst & Inst,unsigned N) const2856   void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2857     assert(N == 1 && "Invalid number of operands!");
2858     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2859   }
2860 
addMemNoOffsetTOperands(MCInst & Inst,unsigned N) const2861   void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2862     assert(N == 1 && "Invalid number of operands!");
2863     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2864   }
2865 
addMemPCRelImm12Operands(MCInst & Inst,unsigned N) const2866   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2867     assert(N == 1 && "Invalid number of operands!");
2868     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2869       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2870     else
2871       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2872   }
2873 
addAdrLabelOperands(MCInst & Inst,unsigned N) const2874   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2875     assert(N == 1 && "Invalid number of operands!");
2876     assert(isImm() && "Not an immediate!");
2877 
2878     // If we have an immediate that's not a constant, treat it as a label
2879     // reference needing a fixup.
2880     if (!isa<MCConstantExpr>(getImm())) {
2881       Inst.addOperand(MCOperand::createExpr(getImm()));
2882       return;
2883     }
2884 
2885     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2886     int Val = CE->getValue();
2887     Inst.addOperand(MCOperand::createImm(Val));
2888   }
2889 
addAlignedMemoryOperands(MCInst & Inst,unsigned N) const2890   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2891     assert(N == 2 && "Invalid number of operands!");
2892     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2893     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2894   }
2895 
addDupAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2896   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2897     addAlignedMemoryOperands(Inst, N);
2898   }
2899 
addAlignedMemoryNoneOperands(MCInst & Inst,unsigned N) const2900   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2901     addAlignedMemoryOperands(Inst, N);
2902   }
2903 
addAlignedMemory16Operands(MCInst & Inst,unsigned N) const2904   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2905     addAlignedMemoryOperands(Inst, N);
2906   }
2907 
addDupAlignedMemory16Operands(MCInst & Inst,unsigned N) const2908   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2909     addAlignedMemoryOperands(Inst, N);
2910   }
2911 
addAlignedMemory32Operands(MCInst & Inst,unsigned N) const2912   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2913     addAlignedMemoryOperands(Inst, N);
2914   }
2915 
addDupAlignedMemory32Operands(MCInst & Inst,unsigned N) const2916   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2917     addAlignedMemoryOperands(Inst, N);
2918   }
2919 
addAlignedMemory64Operands(MCInst & Inst,unsigned N) const2920   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2921     addAlignedMemoryOperands(Inst, N);
2922   }
2923 
addDupAlignedMemory64Operands(MCInst & Inst,unsigned N) const2924   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2925     addAlignedMemoryOperands(Inst, N);
2926   }
2927 
addAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2928   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2929     addAlignedMemoryOperands(Inst, N);
2930   }
2931 
addDupAlignedMemory64or128Operands(MCInst & Inst,unsigned N) const2932   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2933     addAlignedMemoryOperands(Inst, N);
2934   }
2935 
addAlignedMemory64or128or256Operands(MCInst & Inst,unsigned N) const2936   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2937     addAlignedMemoryOperands(Inst, N);
2938   }
2939 
addAddrMode2Operands(MCInst & Inst,unsigned N) const2940   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2941     assert(N == 3 && "Invalid number of operands!");
2942     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2943     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2944     if (!Memory.OffsetRegNum) {
2945       if (!Memory.OffsetImm)
2946         Inst.addOperand(MCOperand::createImm(0));
2947       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2948         int32_t Val = CE->getValue();
2949         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2950         // Special case for #-0
2951         if (Val == std::numeric_limits<int32_t>::min())
2952           Val = 0;
2953         if (Val < 0)
2954           Val = -Val;
2955         Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2956         Inst.addOperand(MCOperand::createImm(Val));
2957       } else
2958         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2959     } else {
2960       // For register offset, we encode the shift type and negation flag
2961       // here.
2962       int32_t Val =
2963           ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2964                             Memory.ShiftImm, Memory.ShiftType);
2965       Inst.addOperand(MCOperand::createImm(Val));
2966     }
2967   }
2968 
addAM2OffsetImmOperands(MCInst & Inst,unsigned N) const2969   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2970     assert(N == 2 && "Invalid number of operands!");
2971     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2972     assert(CE && "non-constant AM2OffsetImm operand!");
2973     int32_t Val = CE->getValue();
2974     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2975     // Special case for #-0
2976     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2977     if (Val < 0) Val = -Val;
2978     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2979     Inst.addOperand(MCOperand::createReg(0));
2980     Inst.addOperand(MCOperand::createImm(Val));
2981   }
2982 
addAddrMode3Operands(MCInst & Inst,unsigned N) const2983   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2984     assert(N == 3 && "Invalid number of operands!");
2985     // If we have an immediate that's not a constant, treat it as a label
2986     // reference needing a fixup. If it is a constant, it's something else
2987     // and we reject it.
2988     if (isImm()) {
2989       Inst.addOperand(MCOperand::createExpr(getImm()));
2990       Inst.addOperand(MCOperand::createReg(0));
2991       Inst.addOperand(MCOperand::createImm(0));
2992       return;
2993     }
2994 
2995     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2996     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2997     if (!Memory.OffsetRegNum) {
2998       if (!Memory.OffsetImm)
2999         Inst.addOperand(MCOperand::createImm(0));
3000       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3001         int32_t Val = CE->getValue();
3002         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3003         // Special case for #-0
3004         if (Val == std::numeric_limits<int32_t>::min())
3005           Val = 0;
3006         if (Val < 0)
3007           Val = -Val;
3008         Val = ARM_AM::getAM3Opc(AddSub, Val);
3009         Inst.addOperand(MCOperand::createImm(Val));
3010       } else
3011         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3012     } else {
3013       // For register offset, we encode the shift type and negation flag
3014       // here.
3015       int32_t Val =
3016           ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3017       Inst.addOperand(MCOperand::createImm(Val));
3018     }
3019   }
3020 
addAM3OffsetOperands(MCInst & Inst,unsigned N) const3021   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3022     assert(N == 2 && "Invalid number of operands!");
3023     if (Kind == k_PostIndexRegister) {
3024       int32_t Val =
3025         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3026       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3027       Inst.addOperand(MCOperand::createImm(Val));
3028       return;
3029     }
3030 
3031     // Constant offset.
3032     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3033     int32_t Val = CE->getValue();
3034     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3035     // Special case for #-0
3036     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3037     if (Val < 0) Val = -Val;
3038     Val = ARM_AM::getAM3Opc(AddSub, Val);
3039     Inst.addOperand(MCOperand::createReg(0));
3040     Inst.addOperand(MCOperand::createImm(Val));
3041   }
3042 
addAddrMode5Operands(MCInst & Inst,unsigned N) const3043   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3044     assert(N == 2 && "Invalid number of operands!");
3045     // If we have an immediate that's not a constant, treat it as a label
3046     // reference needing a fixup. If it is a constant, it's something else
3047     // and we reject it.
3048     if (isImm()) {
3049       Inst.addOperand(MCOperand::createExpr(getImm()));
3050       Inst.addOperand(MCOperand::createImm(0));
3051       return;
3052     }
3053 
3054     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3055     if (!Memory.OffsetImm)
3056       Inst.addOperand(MCOperand::createImm(0));
3057     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3058       // The lower two bits are always zero and as such are not encoded.
3059       int32_t Val = CE->getValue() / 4;
3060       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3061       // Special case for #-0
3062       if (Val == std::numeric_limits<int32_t>::min())
3063         Val = 0;
3064       if (Val < 0)
3065         Val = -Val;
3066       Val = ARM_AM::getAM5Opc(AddSub, Val);
3067       Inst.addOperand(MCOperand::createImm(Val));
3068     } else
3069       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3070   }
3071 
addAddrMode5FP16Operands(MCInst & Inst,unsigned N) const3072   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3073     assert(N == 2 && "Invalid number of operands!");
3074     // If we have an immediate that's not a constant, treat it as a label
3075     // reference needing a fixup. If it is a constant, it's something else
3076     // and we reject it.
3077     if (isImm()) {
3078       Inst.addOperand(MCOperand::createExpr(getImm()));
3079       Inst.addOperand(MCOperand::createImm(0));
3080       return;
3081     }
3082 
3083     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3084     // The lower bit is always zero and as such is not encoded.
3085     if (!Memory.OffsetImm)
3086       Inst.addOperand(MCOperand::createImm(0));
3087     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3088       int32_t Val = CE->getValue() / 2;
3089       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3090       // Special case for #-0
3091       if (Val == std::numeric_limits<int32_t>::min())
3092         Val = 0;
3093       if (Val < 0)
3094         Val = -Val;
3095       Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3096       Inst.addOperand(MCOperand::createImm(Val));
3097     } else
3098       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3099   }
3100 
addMemImm8s4OffsetOperands(MCInst & Inst,unsigned N) const3101   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3102     assert(N == 2 && "Invalid number of operands!");
3103     // If we have an immediate that's not a constant, treat it as a label
3104     // reference needing a fixup. If it is a constant, it's something else
3105     // and we reject it.
3106     if (isImm()) {
3107       Inst.addOperand(MCOperand::createExpr(getImm()));
3108       Inst.addOperand(MCOperand::createImm(0));
3109       return;
3110     }
3111 
3112     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3113     addExpr(Inst, Memory.OffsetImm);
3114   }
3115 
addMemImm7s4OffsetOperands(MCInst & Inst,unsigned N) const3116   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3117     assert(N == 2 && "Invalid number of operands!");
3118     // If we have an immediate that's not a constant, treat it as a label
3119     // reference needing a fixup. If it is a constant, it's something else
3120     // and we reject it.
3121     if (isImm()) {
3122       Inst.addOperand(MCOperand::createExpr(getImm()));
3123       Inst.addOperand(MCOperand::createImm(0));
3124       return;
3125     }
3126 
3127     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3128     addExpr(Inst, Memory.OffsetImm);
3129   }
3130 
addMemImm0_1020s4OffsetOperands(MCInst & Inst,unsigned N) const3131   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3132     assert(N == 2 && "Invalid number of operands!");
3133     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3134     if (!Memory.OffsetImm)
3135       Inst.addOperand(MCOperand::createImm(0));
3136     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3137       // The lower two bits are always zero and as such are not encoded.
3138       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3139     else
3140       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3141   }
3142 
addMemImmOffsetOperands(MCInst & Inst,unsigned N) const3143   void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3144     assert(N == 2 && "Invalid number of operands!");
3145     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3146     addExpr(Inst, Memory.OffsetImm);
3147   }
3148 
addMemRegRQOffsetOperands(MCInst & Inst,unsigned N) const3149   void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3150     assert(N == 2 && "Invalid number of operands!");
3151     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3152     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3153   }
3154 
addMemUImm12OffsetOperands(MCInst & Inst,unsigned N) const3155   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3156     assert(N == 2 && "Invalid number of operands!");
3157     // If this is an immediate, it's a label reference.
3158     if (isImm()) {
3159       addExpr(Inst, getImm());
3160       Inst.addOperand(MCOperand::createImm(0));
3161       return;
3162     }
3163 
3164     // Otherwise, it's a normal memory reg+offset.
3165     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3166     addExpr(Inst, Memory.OffsetImm);
3167   }
3168 
addMemImm12OffsetOperands(MCInst & Inst,unsigned N) const3169   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3170     assert(N == 2 && "Invalid number of operands!");
3171     // If this is an immediate, it's a label reference.
3172     if (isImm()) {
3173       addExpr(Inst, getImm());
3174       Inst.addOperand(MCOperand::createImm(0));
3175       return;
3176     }
3177 
3178     // Otherwise, it's a normal memory reg+offset.
3179     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3180     addExpr(Inst, Memory.OffsetImm);
3181   }
3182 
addConstPoolAsmImmOperands(MCInst & Inst,unsigned N) const3183   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3184     assert(N == 1 && "Invalid number of operands!");
3185     // This is container for the immediate that we will create the constant
3186     // pool from
3187     addExpr(Inst, getConstantPoolImm());
3188   }
3189 
addMemTBBOperands(MCInst & Inst,unsigned N) const3190   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3191     assert(N == 2 && "Invalid number of operands!");
3192     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3193     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3194   }
3195 
addMemTBHOperands(MCInst & Inst,unsigned N) const3196   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3197     assert(N == 2 && "Invalid number of operands!");
3198     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3199     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3200   }
3201 
addMemRegOffsetOperands(MCInst & Inst,unsigned N) const3202   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3203     assert(N == 3 && "Invalid number of operands!");
3204     unsigned Val =
3205       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3206                         Memory.ShiftImm, Memory.ShiftType);
3207     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3208     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3209     Inst.addOperand(MCOperand::createImm(Val));
3210   }
3211 
addT2MemRegOffsetOperands(MCInst & Inst,unsigned N) const3212   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3213     assert(N == 3 && "Invalid number of operands!");
3214     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3215     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3216     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3217   }
3218 
addMemThumbRROperands(MCInst & Inst,unsigned N) const3219   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3220     assert(N == 2 && "Invalid number of operands!");
3221     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3222     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3223   }
3224 
addMemThumbRIs4Operands(MCInst & Inst,unsigned N) const3225   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3226     assert(N == 2 && "Invalid number of operands!");
3227     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3228     if (!Memory.OffsetImm)
3229       Inst.addOperand(MCOperand::createImm(0));
3230     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3231       // The lower two bits are always zero and as such are not encoded.
3232       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3233     else
3234       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3235   }
3236 
addMemThumbRIs2Operands(MCInst & Inst,unsigned N) const3237   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3238     assert(N == 2 && "Invalid number of operands!");
3239     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3240     if (!Memory.OffsetImm)
3241       Inst.addOperand(MCOperand::createImm(0));
3242     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3243       Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3244     else
3245       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3246   }
3247 
addMemThumbRIs1Operands(MCInst & Inst,unsigned N) const3248   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3249     assert(N == 2 && "Invalid number of operands!");
3250     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3251     addExpr(Inst, Memory.OffsetImm);
3252   }
3253 
addMemThumbSPIOperands(MCInst & Inst,unsigned N) const3254   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3255     assert(N == 2 && "Invalid number of operands!");
3256     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3257     if (!Memory.OffsetImm)
3258       Inst.addOperand(MCOperand::createImm(0));
3259     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3260       // The lower two bits are always zero and as such are not encoded.
3261       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3262     else
3263       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3264   }
3265 
addPostIdxImm8Operands(MCInst & Inst,unsigned N) const3266   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3267     assert(N == 1 && "Invalid number of operands!");
3268     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3269     assert(CE && "non-constant post-idx-imm8 operand!");
3270     int Imm = CE->getValue();
3271     bool isAdd = Imm >= 0;
3272     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3273     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3274     Inst.addOperand(MCOperand::createImm(Imm));
3275   }
3276 
addPostIdxImm8s4Operands(MCInst & Inst,unsigned N) const3277   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3278     assert(N == 1 && "Invalid number of operands!");
3279     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3280     assert(CE && "non-constant post-idx-imm8s4 operand!");
3281     int Imm = CE->getValue();
3282     bool isAdd = Imm >= 0;
3283     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3284     // Immediate is scaled by 4.
3285     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3286     Inst.addOperand(MCOperand::createImm(Imm));
3287   }
3288 
addPostIdxRegOperands(MCInst & Inst,unsigned N) const3289   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3290     assert(N == 2 && "Invalid number of operands!");
3291     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3292     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3293   }
3294 
addPostIdxRegShiftedOperands(MCInst & Inst,unsigned N) const3295   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3296     assert(N == 2 && "Invalid number of operands!");
3297     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3298     // The sign, shift type, and shift amount are encoded in a single operand
3299     // using the AM2 encoding helpers.
3300     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3301     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3302                                      PostIdxReg.ShiftTy);
3303     Inst.addOperand(MCOperand::createImm(Imm));
3304   }
3305 
addPowerTwoOperands(MCInst & Inst,unsigned N) const3306   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3307     assert(N == 1 && "Invalid number of operands!");
3308     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3309     Inst.addOperand(MCOperand::createImm(CE->getValue()));
3310   }
3311 
addMSRMaskOperands(MCInst & Inst,unsigned N) const3312   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3313     assert(N == 1 && "Invalid number of operands!");
3314     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3315   }
3316 
addBankedRegOperands(MCInst & Inst,unsigned N) const3317   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3318     assert(N == 1 && "Invalid number of operands!");
3319     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3320   }
3321 
addProcIFlagsOperands(MCInst & Inst,unsigned N) const3322   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3323     assert(N == 1 && "Invalid number of operands!");
3324     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3325   }
3326 
addVecListOperands(MCInst & Inst,unsigned N) const3327   void addVecListOperands(MCInst &Inst, unsigned N) const {
3328     assert(N == 1 && "Invalid number of operands!");
3329     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3330   }
3331 
addMVEVecListOperands(MCInst & Inst,unsigned N) const3332   void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3333     assert(N == 1 && "Invalid number of operands!");
3334 
3335     // When we come here, the VectorList field will identify a range
3336     // of q-registers by its base register and length, and it will
3337     // have already been error-checked to be the expected length of
3338     // range and contain only q-regs in the range q0-q7. So we can
3339     // count on the base register being in the range q0-q6 (for 2
3340     // regs) or q0-q4 (for 4)
3341     //
3342     // The MVE instructions taking a register range of this kind will
3343     // need an operand in the MQQPR or MQQQQPR class, representing the
3344     // entire range as a unit. So we must translate into that class,
3345     // by finding the index of the base register in the MQPR reg
3346     // class, and returning the super-register at the corresponding
3347     // index in the target class.
3348 
3349     const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3350     const MCRegisterClass *RC_out =
3351         (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3352                                 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3353 
3354     unsigned I, E = RC_out->getNumRegs();
3355     for (I = 0; I < E; I++)
3356       if (RC_in->getRegister(I) == VectorList.RegNum)
3357         break;
3358     assert(I < E && "Invalid vector list start register!");
3359 
3360     Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3361   }
3362 
addVecListIndexedOperands(MCInst & Inst,unsigned N) const3363   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3364     assert(N == 2 && "Invalid number of operands!");
3365     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3366     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3367   }
3368 
addVectorIndex8Operands(MCInst & Inst,unsigned N) const3369   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3370     assert(N == 1 && "Invalid number of operands!");
3371     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3372   }
3373 
addVectorIndex16Operands(MCInst & Inst,unsigned N) const3374   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3375     assert(N == 1 && "Invalid number of operands!");
3376     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3377   }
3378 
addVectorIndex32Operands(MCInst & Inst,unsigned N) const3379   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3380     assert(N == 1 && "Invalid number of operands!");
3381     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3382   }
3383 
addVectorIndex64Operands(MCInst & Inst,unsigned N) const3384   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3385     assert(N == 1 && "Invalid number of operands!");
3386     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3387   }
3388 
addMVEVectorIndexOperands(MCInst & Inst,unsigned N) const3389   void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3390     assert(N == 1 && "Invalid number of operands!");
3391     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3392   }
3393 
addMVEPairVectorIndexOperands(MCInst & Inst,unsigned N) const3394   void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3395     assert(N == 1 && "Invalid number of operands!");
3396     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3397   }
3398 
addNEONi8splatOperands(MCInst & Inst,unsigned N) const3399   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3400     assert(N == 1 && "Invalid number of operands!");
3401     // The immediate encodes the type of constant as well as the value.
3402     // Mask in that this is an i8 splat.
3403     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3404     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3405   }
3406 
addNEONi16splatOperands(MCInst & Inst,unsigned N) const3407   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3408     assert(N == 1 && "Invalid number of operands!");
3409     // The immediate encodes the type of constant as well as the value.
3410     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3411     unsigned Value = CE->getValue();
3412     Value = ARM_AM::encodeNEONi16splat(Value);
3413     Inst.addOperand(MCOperand::createImm(Value));
3414   }
3415 
addNEONi16splatNotOperands(MCInst & Inst,unsigned N) const3416   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3417     assert(N == 1 && "Invalid number of operands!");
3418     // The immediate encodes the type of constant as well as the value.
3419     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3420     unsigned Value = CE->getValue();
3421     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3422     Inst.addOperand(MCOperand::createImm(Value));
3423   }
3424 
addNEONi32splatOperands(MCInst & Inst,unsigned N) const3425   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3426     assert(N == 1 && "Invalid number of operands!");
3427     // The immediate encodes the type of constant as well as the value.
3428     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3429     unsigned Value = CE->getValue();
3430     Value = ARM_AM::encodeNEONi32splat(Value);
3431     Inst.addOperand(MCOperand::createImm(Value));
3432   }
3433 
addNEONi32splatNotOperands(MCInst & Inst,unsigned N) const3434   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3435     assert(N == 1 && "Invalid number of operands!");
3436     // The immediate encodes the type of constant as well as the value.
3437     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3438     unsigned Value = CE->getValue();
3439     Value = ARM_AM::encodeNEONi32splat(~Value);
3440     Inst.addOperand(MCOperand::createImm(Value));
3441   }
3442 
addNEONi8ReplicateOperands(MCInst & Inst,bool Inv) const3443   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3444     // The immediate encodes the type of constant as well as the value.
3445     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3446     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3447             Inst.getOpcode() == ARM::VMOVv16i8) &&
3448           "All instructions that wants to replicate non-zero byte "
3449           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3450     unsigned Value = CE->getValue();
3451     if (Inv)
3452       Value = ~Value;
3453     unsigned B = Value & 0xff;
3454     B |= 0xe00; // cmode = 0b1110
3455     Inst.addOperand(MCOperand::createImm(B));
3456   }
3457 
addNEONinvi8ReplicateOperands(MCInst & Inst,unsigned N) const3458   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3459     assert(N == 1 && "Invalid number of operands!");
3460     addNEONi8ReplicateOperands(Inst, true);
3461   }
3462 
encodeNeonVMOVImmediate(unsigned Value)3463   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3464     if (Value >= 256 && Value <= 0xffff)
3465       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3466     else if (Value > 0xffff && Value <= 0xffffff)
3467       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3468     else if (Value > 0xffffff)
3469       Value = (Value >> 24) | 0x600;
3470     return Value;
3471   }
3472 
addNEONi32vmovOperands(MCInst & Inst,unsigned N) const3473   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3474     assert(N == 1 && "Invalid number of operands!");
3475     // The immediate encodes the type of constant as well as the value.
3476     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3477     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3478     Inst.addOperand(MCOperand::createImm(Value));
3479   }
3480 
addNEONvmovi8ReplicateOperands(MCInst & Inst,unsigned N) const3481   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3482     assert(N == 1 && "Invalid number of operands!");
3483     addNEONi8ReplicateOperands(Inst, false);
3484   }
3485 
addNEONvmovi16ReplicateOperands(MCInst & Inst,unsigned N) const3486   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3487     assert(N == 1 && "Invalid number of operands!");
3488     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3489     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3490             Inst.getOpcode() == ARM::VMOVv8i16 ||
3491             Inst.getOpcode() == ARM::VMVNv4i16 ||
3492             Inst.getOpcode() == ARM::VMVNv8i16) &&
3493           "All instructions that want to replicate non-zero half-word "
3494           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3495     uint64_t Value = CE->getValue();
3496     unsigned Elem = Value & 0xffff;
3497     if (Elem >= 256)
3498       Elem = (Elem >> 8) | 0x200;
3499     Inst.addOperand(MCOperand::createImm(Elem));
3500   }
3501 
addNEONi32vmovNegOperands(MCInst & Inst,unsigned N) const3502   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3503     assert(N == 1 && "Invalid number of operands!");
3504     // The immediate encodes the type of constant as well as the value.
3505     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3506     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3507     Inst.addOperand(MCOperand::createImm(Value));
3508   }
3509 
addNEONvmovi32ReplicateOperands(MCInst & Inst,unsigned N) const3510   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3511     assert(N == 1 && "Invalid number of operands!");
3512     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3513     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3514             Inst.getOpcode() == ARM::VMOVv4i32 ||
3515             Inst.getOpcode() == ARM::VMVNv2i32 ||
3516             Inst.getOpcode() == ARM::VMVNv4i32) &&
3517           "All instructions that want to replicate non-zero word "
3518           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3519     uint64_t Value = CE->getValue();
3520     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3521     Inst.addOperand(MCOperand::createImm(Elem));
3522   }
3523 
addNEONi64splatOperands(MCInst & Inst,unsigned N) const3524   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3525     assert(N == 1 && "Invalid number of operands!");
3526     // The immediate encodes the type of constant as well as the value.
3527     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3528     uint64_t Value = CE->getValue();
3529     unsigned Imm = 0;
3530     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3531       Imm |= (Value & 1) << i;
3532     }
3533     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3534   }
3535 
addComplexRotationEvenOperands(MCInst & Inst,unsigned N) const3536   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3537     assert(N == 1 && "Invalid number of operands!");
3538     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3539     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3540   }
3541 
addComplexRotationOddOperands(MCInst & Inst,unsigned N) const3542   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3543     assert(N == 1 && "Invalid number of operands!");
3544     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3545     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3546   }
3547 
addMveSaturateOperands(MCInst & Inst,unsigned N) const3548   void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3549     assert(N == 1 && "Invalid number of operands!");
3550     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3551     unsigned Imm = CE->getValue();
3552     assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3553     Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3554   }
3555 
3556   void print(raw_ostream &OS) const override;
3557 
CreateITMask(unsigned Mask,SMLoc S)3558   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3559     auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3560     Op->ITMask.Mask = Mask;
3561     Op->StartLoc = S;
3562     Op->EndLoc = S;
3563     return Op;
3564   }
3565 
CreateCondCode(ARMCC::CondCodes CC,SMLoc S)3566   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3567                                                     SMLoc S) {
3568     auto Op = std::make_unique<ARMOperand>(k_CondCode);
3569     Op->CC.Val = CC;
3570     Op->StartLoc = S;
3571     Op->EndLoc = S;
3572     return Op;
3573   }
3574 
CreateVPTPred(ARMVCC::VPTCodes CC,SMLoc S)3575   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3576                                                    SMLoc S) {
3577     auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3578     Op->VCC.Val = CC;
3579     Op->StartLoc = S;
3580     Op->EndLoc = S;
3581     return Op;
3582   }
3583 
CreateCoprocNum(unsigned CopVal,SMLoc S)3584   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3585     auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3586     Op->Cop.Val = CopVal;
3587     Op->StartLoc = S;
3588     Op->EndLoc = S;
3589     return Op;
3590   }
3591 
CreateCoprocReg(unsigned CopVal,SMLoc S)3592   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3593     auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3594     Op->Cop.Val = CopVal;
3595     Op->StartLoc = S;
3596     Op->EndLoc = S;
3597     return Op;
3598   }
3599 
CreateCoprocOption(unsigned Val,SMLoc S,SMLoc E)3600   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3601                                                         SMLoc E) {
3602     auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3603     Op->Cop.Val = Val;
3604     Op->StartLoc = S;
3605     Op->EndLoc = E;
3606     return Op;
3607   }
3608 
CreateCCOut(unsigned RegNum,SMLoc S)3609   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3610     auto Op = std::make_unique<ARMOperand>(k_CCOut);
3611     Op->Reg.RegNum = RegNum;
3612     Op->StartLoc = S;
3613     Op->EndLoc = S;
3614     return Op;
3615   }
3616 
CreateToken(StringRef Str,SMLoc S)3617   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3618     auto Op = std::make_unique<ARMOperand>(k_Token);
3619     Op->Tok.Data = Str.data();
3620     Op->Tok.Length = Str.size();
3621     Op->StartLoc = S;
3622     Op->EndLoc = S;
3623     return Op;
3624   }
3625 
CreateReg(unsigned RegNum,SMLoc S,SMLoc E)3626   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3627                                                SMLoc E) {
3628     auto Op = std::make_unique<ARMOperand>(k_Register);
3629     Op->Reg.RegNum = RegNum;
3630     Op->StartLoc = S;
3631     Op->EndLoc = E;
3632     return Op;
3633   }
3634 
3635   static std::unique_ptr<ARMOperand>
CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftReg,unsigned ShiftImm,SMLoc S,SMLoc E)3636   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3637                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3638                         SMLoc E) {
3639     auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3640     Op->RegShiftedReg.ShiftTy = ShTy;
3641     Op->RegShiftedReg.SrcReg = SrcReg;
3642     Op->RegShiftedReg.ShiftReg = ShiftReg;
3643     Op->RegShiftedReg.ShiftImm = ShiftImm;
3644     Op->StartLoc = S;
3645     Op->EndLoc = E;
3646     return Op;
3647   }
3648 
3649   static std::unique_ptr<ARMOperand>
CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,unsigned SrcReg,unsigned ShiftImm,SMLoc S,SMLoc E)3650   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3651                          unsigned ShiftImm, SMLoc S, SMLoc E) {
3652     auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3653     Op->RegShiftedImm.ShiftTy = ShTy;
3654     Op->RegShiftedImm.SrcReg = SrcReg;
3655     Op->RegShiftedImm.ShiftImm = ShiftImm;
3656     Op->StartLoc = S;
3657     Op->EndLoc = E;
3658     return Op;
3659   }
3660 
CreateShifterImm(bool isASR,unsigned Imm,SMLoc S,SMLoc E)3661   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3662                                                       SMLoc S, SMLoc E) {
3663     auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3664     Op->ShifterImm.isASR = isASR;
3665     Op->ShifterImm.Imm = Imm;
3666     Op->StartLoc = S;
3667     Op->EndLoc = E;
3668     return Op;
3669   }
3670 
CreateRotImm(unsigned Imm,SMLoc S,SMLoc E)3671   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3672                                                   SMLoc E) {
3673     auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3674     Op->RotImm.Imm = Imm;
3675     Op->StartLoc = S;
3676     Op->EndLoc = E;
3677     return Op;
3678   }
3679 
CreateModImm(unsigned Bits,unsigned Rot,SMLoc S,SMLoc E)3680   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3681                                                   SMLoc S, SMLoc E) {
3682     auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3683     Op->ModImm.Bits = Bits;
3684     Op->ModImm.Rot = Rot;
3685     Op->StartLoc = S;
3686     Op->EndLoc = E;
3687     return Op;
3688   }
3689 
3690   static std::unique_ptr<ARMOperand>
CreateConstantPoolImm(const MCExpr * Val,SMLoc S,SMLoc E)3691   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3692     auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3693     Op->Imm.Val = Val;
3694     Op->StartLoc = S;
3695     Op->EndLoc = E;
3696     return Op;
3697   }
3698 
3699   static std::unique_ptr<ARMOperand>
CreateBitfield(unsigned LSB,unsigned Width,SMLoc S,SMLoc E)3700   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3701     auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3702     Op->Bitfield.LSB = LSB;
3703     Op->Bitfield.Width = Width;
3704     Op->StartLoc = S;
3705     Op->EndLoc = E;
3706     return Op;
3707   }
3708 
3709   static std::unique_ptr<ARMOperand>
CreateRegList(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,SMLoc StartLoc,SMLoc EndLoc)3710   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3711                 SMLoc StartLoc, SMLoc EndLoc) {
3712     assert(Regs.size() > 0 && "RegList contains no registers?");
3713     KindTy Kind = k_RegisterList;
3714 
3715     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3716             Regs.front().second)) {
3717       if (Regs.back().second == ARM::VPR)
3718         Kind = k_FPDRegisterListWithVPR;
3719       else
3720         Kind = k_DPRRegisterList;
3721     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3722                    Regs.front().second)) {
3723       if (Regs.back().second == ARM::VPR)
3724         Kind = k_FPSRegisterListWithVPR;
3725       else
3726         Kind = k_SPRRegisterList;
3727     }
3728 
3729     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3730       Kind = k_RegisterListWithAPSR;
3731 
3732     assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3733 
3734     auto Op = std::make_unique<ARMOperand>(Kind);
3735     for (const auto &P : Regs)
3736       Op->Registers.push_back(P.second);
3737 
3738     Op->StartLoc = StartLoc;
3739     Op->EndLoc = EndLoc;
3740     return Op;
3741   }
3742 
CreateVectorList(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)3743   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3744                                                       unsigned Count,
3745                                                       bool isDoubleSpaced,
3746                                                       SMLoc S, SMLoc E) {
3747     auto Op = std::make_unique<ARMOperand>(k_VectorList);
3748     Op->VectorList.RegNum = RegNum;
3749     Op->VectorList.Count = Count;
3750     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3751     Op->StartLoc = S;
3752     Op->EndLoc = E;
3753     return Op;
3754   }
3755 
3756   static std::unique_ptr<ARMOperand>
CreateVectorListAllLanes(unsigned RegNum,unsigned Count,bool isDoubleSpaced,SMLoc S,SMLoc E)3757   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3758                            SMLoc S, SMLoc E) {
3759     auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3760     Op->VectorList.RegNum = RegNum;
3761     Op->VectorList.Count = Count;
3762     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3763     Op->StartLoc = S;
3764     Op->EndLoc = E;
3765     return Op;
3766   }
3767 
3768   static std::unique_ptr<ARMOperand>
CreateVectorListIndexed(unsigned RegNum,unsigned Count,unsigned Index,bool isDoubleSpaced,SMLoc S,SMLoc E)3769   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3770                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3771     auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3772     Op->VectorList.RegNum = RegNum;
3773     Op->VectorList.Count = Count;
3774     Op->VectorList.LaneIndex = Index;
3775     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3776     Op->StartLoc = S;
3777     Op->EndLoc = E;
3778     return Op;
3779   }
3780 
3781   static std::unique_ptr<ARMOperand>
CreateVectorIndex(unsigned Idx,SMLoc S,SMLoc E,MCContext & Ctx)3782   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3783     auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3784     Op->VectorIndex.Val = Idx;
3785     Op->StartLoc = S;
3786     Op->EndLoc = E;
3787     return Op;
3788   }
3789 
CreateImm(const MCExpr * Val,SMLoc S,SMLoc E)3790   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3791                                                SMLoc E) {
3792     auto Op = std::make_unique<ARMOperand>(k_Immediate);
3793     Op->Imm.Val = Val;
3794     Op->StartLoc = S;
3795     Op->EndLoc = E;
3796     return Op;
3797   }
3798 
3799   static std::unique_ptr<ARMOperand>
CreateMem(unsigned BaseRegNum,const MCExpr * OffsetImm,unsigned OffsetRegNum,ARM_AM::ShiftOpc ShiftType,unsigned ShiftImm,unsigned Alignment,bool isNegative,SMLoc S,SMLoc E,SMLoc AlignmentLoc=SMLoc ())3800   CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3801             ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3802             bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3803     auto Op = std::make_unique<ARMOperand>(k_Memory);
3804     Op->Memory.BaseRegNum = BaseRegNum;
3805     Op->Memory.OffsetImm = OffsetImm;
3806     Op->Memory.OffsetRegNum = OffsetRegNum;
3807     Op->Memory.ShiftType = ShiftType;
3808     Op->Memory.ShiftImm = ShiftImm;
3809     Op->Memory.Alignment = Alignment;
3810     Op->Memory.isNegative = isNegative;
3811     Op->StartLoc = S;
3812     Op->EndLoc = E;
3813     Op->AlignmentLoc = AlignmentLoc;
3814     return Op;
3815   }
3816 
3817   static std::unique_ptr<ARMOperand>
CreatePostIdxReg(unsigned RegNum,bool isAdd,ARM_AM::ShiftOpc ShiftTy,unsigned ShiftImm,SMLoc S,SMLoc E)3818   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3819                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3820     auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3821     Op->PostIdxReg.RegNum = RegNum;
3822     Op->PostIdxReg.isAdd = isAdd;
3823     Op->PostIdxReg.ShiftTy = ShiftTy;
3824     Op->PostIdxReg.ShiftImm = ShiftImm;
3825     Op->StartLoc = S;
3826     Op->EndLoc = E;
3827     return Op;
3828   }
3829 
CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,SMLoc S)3830   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3831                                                          SMLoc S) {
3832     auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3833     Op->MBOpt.Val = Opt;
3834     Op->StartLoc = S;
3835     Op->EndLoc = S;
3836     return Op;
3837   }
3838 
3839   static std::unique_ptr<ARMOperand>
CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,SMLoc S)3840   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3841     auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3842     Op->ISBOpt.Val = Opt;
3843     Op->StartLoc = S;
3844     Op->EndLoc = S;
3845     return Op;
3846   }
3847 
3848   static std::unique_ptr<ARMOperand>
CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt,SMLoc S)3849   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3850     auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3851     Op->TSBOpt.Val = Opt;
3852     Op->StartLoc = S;
3853     Op->EndLoc = S;
3854     return Op;
3855   }
3856 
CreateProcIFlags(ARM_PROC::IFlags IFlags,SMLoc S)3857   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3858                                                       SMLoc S) {
3859     auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3860     Op->IFlags.Val = IFlags;
3861     Op->StartLoc = S;
3862     Op->EndLoc = S;
3863     return Op;
3864   }
3865 
CreateMSRMask(unsigned MMask,SMLoc S)3866   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3867     auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3868     Op->MMask.Val = MMask;
3869     Op->StartLoc = S;
3870     Op->EndLoc = S;
3871     return Op;
3872   }
3873 
CreateBankedReg(unsigned Reg,SMLoc S)3874   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3875     auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3876     Op->BankedReg.Val = Reg;
3877     Op->StartLoc = S;
3878     Op->EndLoc = S;
3879     return Op;
3880   }
3881 };
3882 
3883 } // end anonymous namespace.
3884 
print(raw_ostream & OS) const3885 void ARMOperand::print(raw_ostream &OS) const {
3886   auto RegName = [](MCRegister Reg) {
3887     if (Reg)
3888       return ARMInstPrinter::getRegisterName(Reg);
3889     else
3890       return "noreg";
3891   };
3892 
3893   switch (Kind) {
3894   case k_CondCode:
3895     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3896     break;
3897   case k_VPTPred:
3898     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3899     break;
3900   case k_CCOut:
3901     OS << "<ccout " << RegName(getReg()) << ">";
3902     break;
3903   case k_ITCondMask: {
3904     static const char *const MaskStr[] = {
3905       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3906       "(tt)",      "(ttet)", "(tte)", "(ttee)",
3907       "(t)",       "(tett)", "(tet)", "(tete)",
3908       "(te)",      "(teet)", "(tee)", "(teee)",
3909     };
3910     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3911     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3912     break;
3913   }
3914   case k_CoprocNum:
3915     OS << "<coprocessor number: " << getCoproc() << ">";
3916     break;
3917   case k_CoprocReg:
3918     OS << "<coprocessor register: " << getCoproc() << ">";
3919     break;
3920   case k_CoprocOption:
3921     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3922     break;
3923   case k_MSRMask:
3924     OS << "<mask: " << getMSRMask() << ">";
3925     break;
3926   case k_BankedReg:
3927     OS << "<banked reg: " << getBankedReg() << ">";
3928     break;
3929   case k_Immediate:
3930     OS << *getImm();
3931     break;
3932   case k_MemBarrierOpt:
3933     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3934     break;
3935   case k_InstSyncBarrierOpt:
3936     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3937     break;
3938   case k_TraceSyncBarrierOpt:
3939     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3940     break;
3941   case k_Memory:
3942     OS << "<memory";
3943     if (Memory.BaseRegNum)
3944       OS << " base:" << RegName(Memory.BaseRegNum);
3945     if (Memory.OffsetImm)
3946       OS << " offset-imm:" << *Memory.OffsetImm;
3947     if (Memory.OffsetRegNum)
3948       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3949          << RegName(Memory.OffsetRegNum);
3950     if (Memory.ShiftType != ARM_AM::no_shift) {
3951       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3952       OS << " shift-imm:" << Memory.ShiftImm;
3953     }
3954     if (Memory.Alignment)
3955       OS << " alignment:" << Memory.Alignment;
3956     OS << ">";
3957     break;
3958   case k_PostIndexRegister:
3959     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3960        << RegName(PostIdxReg.RegNum);
3961     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3962       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3963          << PostIdxReg.ShiftImm;
3964     OS << ">";
3965     break;
3966   case k_ProcIFlags: {
3967     OS << "<ARM_PROC::";
3968     unsigned IFlags = getProcIFlags();
3969     for (int i=2; i >= 0; --i)
3970       if (IFlags & (1 << i))
3971         OS << ARM_PROC::IFlagsToString(1 << i);
3972     OS << ">";
3973     break;
3974   }
3975   case k_Register:
3976     OS << "<register " << RegName(getReg()) << ">";
3977     break;
3978   case k_ShifterImmediate:
3979     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3980        << " #" << ShifterImm.Imm << ">";
3981     break;
3982   case k_ShiftedRegister:
3983     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3984        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3985        << RegName(RegShiftedReg.ShiftReg) << ">";
3986     break;
3987   case k_ShiftedImmediate:
3988     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3989        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3990        << RegShiftedImm.ShiftImm << ">";
3991     break;
3992   case k_RotateImmediate:
3993     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3994     break;
3995   case k_ModifiedImmediate:
3996     OS << "<mod_imm #" << ModImm.Bits << ", #"
3997        <<  ModImm.Rot << ")>";
3998     break;
3999   case k_ConstantPoolImmediate:
4000     OS << "<constant_pool_imm #" << *getConstantPoolImm();
4001     break;
4002   case k_BitfieldDescriptor:
4003     OS << "<bitfield " << "lsb: " << Bitfield.LSB
4004        << ", width: " << Bitfield.Width << ">";
4005     break;
4006   case k_RegisterList:
4007   case k_RegisterListWithAPSR:
4008   case k_DPRRegisterList:
4009   case k_SPRRegisterList:
4010   case k_FPSRegisterListWithVPR:
4011   case k_FPDRegisterListWithVPR: {
4012     OS << "<register_list ";
4013 
4014     const SmallVectorImpl<unsigned> &RegList = getRegList();
4015     for (SmallVectorImpl<unsigned>::const_iterator
4016            I = RegList.begin(), E = RegList.end(); I != E; ) {
4017       OS << RegName(*I);
4018       if (++I < E) OS << ", ";
4019     }
4020 
4021     OS << ">";
4022     break;
4023   }
4024   case k_VectorList:
4025     OS << "<vector_list " << VectorList.Count << " * "
4026        << RegName(VectorList.RegNum) << ">";
4027     break;
4028   case k_VectorListAllLanes:
4029     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4030        << RegName(VectorList.RegNum) << ">";
4031     break;
4032   case k_VectorListIndexed:
4033     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4034        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4035     break;
4036   case k_Token:
4037     OS << "'" << getToken() << "'";
4038     break;
4039   case k_VectorIndex:
4040     OS << "<vectorindex " << getVectorIndex() << ">";
4041     break;
4042   }
4043 }
4044 
4045 /// @name Auto-generated Match Functions
4046 /// {
4047 
4048 static unsigned MatchRegisterName(StringRef Name);
4049 
4050 /// }
4051 
parseRegister(MCRegister & Reg,SMLoc & StartLoc,SMLoc & EndLoc)4052 bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4053                                  SMLoc &EndLoc) {
4054   const AsmToken &Tok = getParser().getTok();
4055   StartLoc = Tok.getLoc();
4056   EndLoc = Tok.getEndLoc();
4057   Reg = tryParseRegister();
4058 
4059   return Reg == (unsigned)-1;
4060 }
4061 
tryParseRegister(MCRegister & Reg,SMLoc & StartLoc,SMLoc & EndLoc)4062 ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4063                                            SMLoc &EndLoc) {
4064   if (parseRegister(Reg, StartLoc, EndLoc))
4065     return ParseStatus::NoMatch;
4066   return ParseStatus::Success;
4067 }
4068 
4069 /// Try to parse a register name.  The token must be an Identifier when called,
4070 /// and if it is a register name the token is eaten and the register number is
4071 /// returned.  Otherwise return -1.
tryParseRegister()4072 int ARMAsmParser::tryParseRegister() {
4073   MCAsmParser &Parser = getParser();
4074   const AsmToken &Tok = Parser.getTok();
4075   if (Tok.isNot(AsmToken::Identifier)) return -1;
4076 
4077   std::string lowerCase = Tok.getString().lower();
4078   unsigned RegNum = MatchRegisterName(lowerCase);
4079   if (!RegNum) {
4080     RegNum = StringSwitch<unsigned>(lowerCase)
4081       .Case("r13", ARM::SP)
4082       .Case("r14", ARM::LR)
4083       .Case("r15", ARM::PC)
4084       .Case("ip", ARM::R12)
4085       // Additional register name aliases for 'gas' compatibility.
4086       .Case("a1", ARM::R0)
4087       .Case("a2", ARM::R1)
4088       .Case("a3", ARM::R2)
4089       .Case("a4", ARM::R3)
4090       .Case("v1", ARM::R4)
4091       .Case("v2", ARM::R5)
4092       .Case("v3", ARM::R6)
4093       .Case("v4", ARM::R7)
4094       .Case("v5", ARM::R8)
4095       .Case("v6", ARM::R9)
4096       .Case("v7", ARM::R10)
4097       .Case("v8", ARM::R11)
4098       .Case("sb", ARM::R9)
4099       .Case("sl", ARM::R10)
4100       .Case("fp", ARM::R11)
4101       .Default(0);
4102   }
4103   if (!RegNum) {
4104     // Check for aliases registered via .req. Canonicalize to lower case.
4105     // That's more consistent since register names are case insensitive, and
4106     // it's how the original entry was passed in from MC/MCParser/AsmParser.
4107     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4108     // If no match, return failure.
4109     if (Entry == RegisterReqs.end())
4110       return -1;
4111     Parser.Lex(); // Eat identifier token.
4112     return Entry->getValue();
4113   }
4114 
4115   // Some FPUs only have 16 D registers, so D16-D31 are invalid
4116   if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4117     return -1;
4118 
4119   Parser.Lex(); // Eat identifier token.
4120 
4121   return RegNum;
4122 }
4123 
4124 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
4125 // If a recoverable error occurs, return 1. If an irrecoverable error
4126 // occurs, return -1. An irrecoverable error is one where tokens have been
4127 // consumed in the process of trying to parse the shifter (i.e., when it is
4128 // indeed a shifter operand, but malformed).
tryParseShiftRegister(OperandVector & Operands)4129 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4130   MCAsmParser &Parser = getParser();
4131   SMLoc S = Parser.getTok().getLoc();
4132   const AsmToken &Tok = Parser.getTok();
4133   if (Tok.isNot(AsmToken::Identifier))
4134     return -1;
4135 
4136   std::string lowerCase = Tok.getString().lower();
4137   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
4138       .Case("asl", ARM_AM::lsl)
4139       .Case("lsl", ARM_AM::lsl)
4140       .Case("lsr", ARM_AM::lsr)
4141       .Case("asr", ARM_AM::asr)
4142       .Case("ror", ARM_AM::ror)
4143       .Case("rrx", ARM_AM::rrx)
4144       .Default(ARM_AM::no_shift);
4145 
4146   if (ShiftTy == ARM_AM::no_shift)
4147     return 1;
4148 
4149   Parser.Lex(); // Eat the operator.
4150 
4151   // The source register for the shift has already been added to the
4152   // operand list, so we need to pop it off and combine it into the shifted
4153   // register operand instead.
4154   std::unique_ptr<ARMOperand> PrevOp(
4155       (ARMOperand *)Operands.pop_back_val().release());
4156   if (!PrevOp->isReg())
4157     return Error(PrevOp->getStartLoc(), "shift must be of a register");
4158   int SrcReg = PrevOp->getReg();
4159 
4160   SMLoc EndLoc;
4161   int64_t Imm = 0;
4162   int ShiftReg = 0;
4163   if (ShiftTy == ARM_AM::rrx) {
4164     // RRX Doesn't have an explicit shift amount. The encoder expects
4165     // the shift register to be the same as the source register. Seems odd,
4166     // but OK.
4167     ShiftReg = SrcReg;
4168   } else {
4169     // Figure out if this is shifted by a constant or a register (for non-RRX).
4170     if (Parser.getTok().is(AsmToken::Hash) ||
4171         Parser.getTok().is(AsmToken::Dollar)) {
4172       Parser.Lex(); // Eat hash.
4173       SMLoc ImmLoc = Parser.getTok().getLoc();
4174       const MCExpr *ShiftExpr = nullptr;
4175       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4176         Error(ImmLoc, "invalid immediate shift value");
4177         return -1;
4178       }
4179       // The expression must be evaluatable as an immediate.
4180       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4181       if (!CE) {
4182         Error(ImmLoc, "invalid immediate shift value");
4183         return -1;
4184       }
4185       // Range check the immediate.
4186       // lsl, ror: 0 <= imm <= 31
4187       // lsr, asr: 0 <= imm <= 32
4188       Imm = CE->getValue();
4189       if (Imm < 0 ||
4190           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4191           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4192         Error(ImmLoc, "immediate shift value out of range");
4193         return -1;
4194       }
4195       // shift by zero is a nop. Always send it through as lsl.
4196       // ('as' compatibility)
4197       if (Imm == 0)
4198         ShiftTy = ARM_AM::lsl;
4199     } else if (Parser.getTok().is(AsmToken::Identifier)) {
4200       SMLoc L = Parser.getTok().getLoc();
4201       EndLoc = Parser.getTok().getEndLoc();
4202       ShiftReg = tryParseRegister();
4203       if (ShiftReg == -1) {
4204         Error(L, "expected immediate or register in shift operand");
4205         return -1;
4206       }
4207     } else {
4208       Error(Parser.getTok().getLoc(),
4209             "expected immediate or register in shift operand");
4210       return -1;
4211     }
4212   }
4213 
4214   if (ShiftReg && ShiftTy != ARM_AM::rrx)
4215     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4216                                                          ShiftReg, Imm,
4217                                                          S, EndLoc));
4218   else
4219     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4220                                                           S, EndLoc));
4221 
4222   return 0;
4223 }
4224 
4225 /// Try to parse a register name.  The token must be an Identifier when called.
4226 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4227 /// if there is a "writeback". 'true' if it's not a register.
4228 ///
4229 /// TODO this is likely to change to allow different register types and or to
4230 /// parse for a specific register type.
tryParseRegisterWithWriteBack(OperandVector & Operands)4231 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4232   MCAsmParser &Parser = getParser();
4233   SMLoc RegStartLoc = Parser.getTok().getLoc();
4234   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4235   int RegNo = tryParseRegister();
4236   if (RegNo == -1)
4237     return true;
4238 
4239   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4240 
4241   const AsmToken &ExclaimTok = Parser.getTok();
4242   if (ExclaimTok.is(AsmToken::Exclaim)) {
4243     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4244                                                ExclaimTok.getLoc()));
4245     Parser.Lex(); // Eat exclaim token
4246     return false;
4247   }
4248 
4249   // Also check for an index operand. This is only legal for vector registers,
4250   // but that'll get caught OK in operand matching, so we don't need to
4251   // explicitly filter everything else out here.
4252   if (Parser.getTok().is(AsmToken::LBrac)) {
4253     SMLoc SIdx = Parser.getTok().getLoc();
4254     Parser.Lex(); // Eat left bracket token.
4255 
4256     const MCExpr *ImmVal;
4257     if (getParser().parseExpression(ImmVal))
4258       return true;
4259     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4260     if (!MCE)
4261       return TokError("immediate value expected for vector index");
4262 
4263     if (Parser.getTok().isNot(AsmToken::RBrac))
4264       return Error(Parser.getTok().getLoc(), "']' expected");
4265 
4266     SMLoc E = Parser.getTok().getEndLoc();
4267     Parser.Lex(); // Eat right bracket token.
4268 
4269     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4270                                                      SIdx, E,
4271                                                      getContext()));
4272   }
4273 
4274   return false;
4275 }
4276 
4277 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4278 /// instruction with a symbolic operand name.
4279 /// We accept "crN" syntax for GAS compatibility.
4280 /// <operand-name> ::= <prefix><number>
4281 /// If CoprocOp is 'c', then:
4282 ///   <prefix> ::= c | cr
4283 /// If CoprocOp is 'p', then :
4284 ///   <prefix> ::= p
4285 /// <number> ::= integer in range [0, 15]
MatchCoprocessorOperandName(StringRef Name,char CoprocOp)4286 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4287   // Use the same layout as the tablegen'erated register name matcher. Ugly,
4288   // but efficient.
4289   if (Name.size() < 2 || Name[0] != CoprocOp)
4290     return -1;
4291   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4292 
4293   switch (Name.size()) {
4294   default: return -1;
4295   case 1:
4296     switch (Name[0]) {
4297     default:  return -1;
4298     case '0': return 0;
4299     case '1': return 1;
4300     case '2': return 2;
4301     case '3': return 3;
4302     case '4': return 4;
4303     case '5': return 5;
4304     case '6': return 6;
4305     case '7': return 7;
4306     case '8': return 8;
4307     case '9': return 9;
4308     }
4309   case 2:
4310     if (Name[0] != '1')
4311       return -1;
4312     switch (Name[1]) {
4313     default:  return -1;
4314     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4315     // However, old cores (v5/v6) did use them in that way.
4316     case '0': return 10;
4317     case '1': return 11;
4318     case '2': return 12;
4319     case '3': return 13;
4320     case '4': return 14;
4321     case '5': return 15;
4322     }
4323   }
4324 }
4325 
4326 /// parseITCondCode - Try to parse a condition code for an IT instruction.
parseITCondCode(OperandVector & Operands)4327 ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4328   MCAsmParser &Parser = getParser();
4329   SMLoc S = Parser.getTok().getLoc();
4330   const AsmToken &Tok = Parser.getTok();
4331   if (!Tok.is(AsmToken::Identifier))
4332     return ParseStatus::NoMatch;
4333   unsigned CC = ARMCondCodeFromString(Tok.getString());
4334   if (CC == ~0U)
4335     return ParseStatus::NoMatch;
4336   Parser.Lex(); // Eat the token.
4337 
4338   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4339 
4340   return ParseStatus::Success;
4341 }
4342 
4343 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4344 /// token must be an Identifier when called, and if it is a coprocessor
4345 /// number, the token is eaten and the operand is added to the operand list.
parseCoprocNumOperand(OperandVector & Operands)4346 ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4347   MCAsmParser &Parser = getParser();
4348   SMLoc S = Parser.getTok().getLoc();
4349   const AsmToken &Tok = Parser.getTok();
4350   if (Tok.isNot(AsmToken::Identifier))
4351     return ParseStatus::NoMatch;
4352 
4353   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4354   if (Num == -1)
4355     return ParseStatus::NoMatch;
4356   if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4357     return ParseStatus::NoMatch;
4358 
4359   Parser.Lex(); // Eat identifier token.
4360   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4361   return ParseStatus::Success;
4362 }
4363 
4364 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4365 /// token must be an Identifier when called, and if it is a coprocessor
4366 /// number, the token is eaten and the operand is added to the operand list.
parseCoprocRegOperand(OperandVector & Operands)4367 ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4368   MCAsmParser &Parser = getParser();
4369   SMLoc S = Parser.getTok().getLoc();
4370   const AsmToken &Tok = Parser.getTok();
4371   if (Tok.isNot(AsmToken::Identifier))
4372     return ParseStatus::NoMatch;
4373 
4374   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4375   if (Reg == -1)
4376     return ParseStatus::NoMatch;
4377 
4378   Parser.Lex(); // Eat identifier token.
4379   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4380   return ParseStatus::Success;
4381 }
4382 
4383 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4384 /// coproc_option : '{' imm0_255 '}'
parseCoprocOptionOperand(OperandVector & Operands)4385 ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4386   MCAsmParser &Parser = getParser();
4387   SMLoc S = Parser.getTok().getLoc();
4388 
4389   // If this isn't a '{', this isn't a coprocessor immediate operand.
4390   if (Parser.getTok().isNot(AsmToken::LCurly))
4391     return ParseStatus::NoMatch;
4392   Parser.Lex(); // Eat the '{'
4393 
4394   const MCExpr *Expr;
4395   SMLoc Loc = Parser.getTok().getLoc();
4396   if (getParser().parseExpression(Expr))
4397     return Error(Loc, "illegal expression");
4398   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4399   if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4400     return Error(Loc,
4401                  "coprocessor option must be an immediate in range [0, 255]");
4402   int Val = CE->getValue();
4403 
4404   // Check for and consume the closing '}'
4405   if (Parser.getTok().isNot(AsmToken::RCurly))
4406     return ParseStatus::Failure;
4407   SMLoc E = Parser.getTok().getEndLoc();
4408   Parser.Lex(); // Eat the '}'
4409 
4410   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4411   return ParseStatus::Success;
4412 }
4413 
4414 // For register list parsing, we need to map from raw GPR register numbering
4415 // to the enumeration values. The enumeration values aren't sorted by
4416 // register number due to our using "sp", "lr" and "pc" as canonical names.
getNextRegister(unsigned Reg)4417 static unsigned getNextRegister(unsigned Reg) {
4418   // If this is a GPR, we need to do it manually, otherwise we can rely
4419   // on the sort ordering of the enumeration since the other reg-classes
4420   // are sane.
4421   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4422     return Reg + 1;
4423   switch(Reg) {
4424   default: llvm_unreachable("Invalid GPR number!");
4425   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
4426   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
4427   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
4428   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
4429   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
4430   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4431   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
4432   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
4433   }
4434 }
4435 
4436 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4437 // success, or false, if duplicate encoding found.
4438 static bool
insertNoDuplicates(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,unsigned Enc,unsigned Reg)4439 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4440                    unsigned Enc, unsigned Reg) {
4441   Regs.emplace_back(Enc, Reg);
4442   for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4443     if (J->first == Enc) {
4444       Regs.erase(J.base());
4445       return false;
4446     }
4447     if (J->first < Enc)
4448       break;
4449     std::swap(*I, *J);
4450   }
4451   return true;
4452 }
4453 
4454 /// Parse a register list.
parseRegisterList(OperandVector & Operands,bool EnforceOrder,bool AllowRAAC)4455 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4456                                      bool AllowRAAC) {
4457   MCAsmParser &Parser = getParser();
4458   if (Parser.getTok().isNot(AsmToken::LCurly))
4459     return TokError("Token is not a Left Curly Brace");
4460   SMLoc S = Parser.getTok().getLoc();
4461   Parser.Lex(); // Eat '{' token.
4462   SMLoc RegLoc = Parser.getTok().getLoc();
4463 
4464   // Check the first register in the list to see what register class
4465   // this is a list of.
4466   int Reg = tryParseRegister();
4467   if (Reg == -1)
4468     return Error(RegLoc, "register expected");
4469   if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4470     return Error(RegLoc, "pseudo-register not allowed");
4471   // The reglist instructions have at most 16 registers, so reserve
4472   // space for that many.
4473   int EReg = 0;
4474   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4475 
4476   // Allow Q regs and just interpret them as the two D sub-registers.
4477   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4478     Reg = getDRegFromQReg(Reg);
4479     EReg = MRI->getEncodingValue(Reg);
4480     Registers.emplace_back(EReg, Reg);
4481     ++Reg;
4482   }
4483   const MCRegisterClass *RC;
4484   if (Reg == ARM::RA_AUTH_CODE ||
4485       ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4486     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4487   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4488     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4489   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4490     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4491   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4492     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4493   else
4494     return Error(RegLoc, "invalid register in register list");
4495 
4496   // Store the register.
4497   EReg = MRI->getEncodingValue(Reg);
4498   Registers.emplace_back(EReg, Reg);
4499 
4500   // This starts immediately after the first register token in the list,
4501   // so we can see either a comma or a minus (range separator) as a legal
4502   // next token.
4503   while (Parser.getTok().is(AsmToken::Comma) ||
4504          Parser.getTok().is(AsmToken::Minus)) {
4505     if (Parser.getTok().is(AsmToken::Minus)) {
4506       if (Reg == ARM::RA_AUTH_CODE)
4507         return Error(RegLoc, "pseudo-register not allowed");
4508       Parser.Lex(); // Eat the minus.
4509       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4510       int EndReg = tryParseRegister();
4511       if (EndReg == -1)
4512         return Error(AfterMinusLoc, "register expected");
4513       if (EndReg == ARM::RA_AUTH_CODE)
4514         return Error(AfterMinusLoc, "pseudo-register not allowed");
4515       // Allow Q regs and just interpret them as the two D sub-registers.
4516       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4517         EndReg = getDRegFromQReg(EndReg) + 1;
4518       // If the register is the same as the start reg, there's nothing
4519       // more to do.
4520       if (Reg == EndReg)
4521         continue;
4522       // The register must be in the same register class as the first.
4523       if (!RC->contains(Reg))
4524         return Error(AfterMinusLoc, "invalid register in register list");
4525       // Ranges must go from low to high.
4526       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4527         return Error(AfterMinusLoc, "bad range in register list");
4528 
4529       // Add all the registers in the range to the register list.
4530       while (Reg != EndReg) {
4531         Reg = getNextRegister(Reg);
4532         EReg = MRI->getEncodingValue(Reg);
4533         if (!insertNoDuplicates(Registers, EReg, Reg)) {
4534           Warning(AfterMinusLoc, StringRef("duplicated register (") +
4535                                      ARMInstPrinter::getRegisterName(Reg) +
4536                                      ") in register list");
4537         }
4538       }
4539       continue;
4540     }
4541     Parser.Lex(); // Eat the comma.
4542     RegLoc = Parser.getTok().getLoc();
4543     int OldReg = Reg;
4544     const AsmToken RegTok = Parser.getTok();
4545     Reg = tryParseRegister();
4546     if (Reg == -1)
4547       return Error(RegLoc, "register expected");
4548     if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4549       return Error(RegLoc, "pseudo-register not allowed");
4550     // Allow Q regs and just interpret them as the two D sub-registers.
4551     bool isQReg = false;
4552     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4553       Reg = getDRegFromQReg(Reg);
4554       isQReg = true;
4555     }
4556     if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4557         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4558         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4559       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4560       // subset of GPRRegClassId except it contains APSR as well.
4561       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4562     }
4563     if (Reg == ARM::VPR &&
4564         (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4565          RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4566          RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4567       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4568       EReg = MRI->getEncodingValue(Reg);
4569       if (!insertNoDuplicates(Registers, EReg, Reg)) {
4570         Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4571                             ") in register list");
4572       }
4573       continue;
4574     }
4575     // The register must be in the same register class as the first.
4576     if ((Reg == ARM::RA_AUTH_CODE &&
4577          RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4578         (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4579       return Error(RegLoc, "invalid register in register list");
4580     // In most cases, the list must be monotonically increasing. An
4581     // exception is CLRM, which is order-independent anyway, so
4582     // there's no potential for confusion if you write clrm {r2,r1}
4583     // instead of clrm {r1,r2}.
4584     if (EnforceOrder &&
4585         MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4586       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4587         Warning(RegLoc, "register list not in ascending order");
4588       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4589         return Error(RegLoc, "register list not in ascending order");
4590     }
4591     // VFP register lists must also be contiguous.
4592     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4593         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4594         Reg != OldReg + 1)
4595       return Error(RegLoc, "non-contiguous register range");
4596     EReg = MRI->getEncodingValue(Reg);
4597     if (!insertNoDuplicates(Registers, EReg, Reg)) {
4598       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4599                           ") in register list");
4600     }
4601     if (isQReg) {
4602       EReg = MRI->getEncodingValue(++Reg);
4603       Registers.emplace_back(EReg, Reg);
4604     }
4605   }
4606 
4607   if (Parser.getTok().isNot(AsmToken::RCurly))
4608     return Error(Parser.getTok().getLoc(), "'}' expected");
4609   SMLoc E = Parser.getTok().getEndLoc();
4610   Parser.Lex(); // Eat '}' token.
4611 
4612   // Push the register list operand.
4613   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4614 
4615   // The ARM system instruction variants for LDM/STM have a '^' token here.
4616   if (Parser.getTok().is(AsmToken::Caret)) {
4617     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4618     Parser.Lex(); // Eat '^' token.
4619   }
4620 
4621   return false;
4622 }
4623 
4624 // Helper function to parse the lane index for vector lists.
parseVectorLane(VectorLaneTy & LaneKind,unsigned & Index,SMLoc & EndLoc)4625 ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4626                                           unsigned &Index, SMLoc &EndLoc) {
4627   MCAsmParser &Parser = getParser();
4628   Index = 0; // Always return a defined index value.
4629   if (Parser.getTok().is(AsmToken::LBrac)) {
4630     Parser.Lex(); // Eat the '['.
4631     if (Parser.getTok().is(AsmToken::RBrac)) {
4632       // "Dn[]" is the 'all lanes' syntax.
4633       LaneKind = AllLanes;
4634       EndLoc = Parser.getTok().getEndLoc();
4635       Parser.Lex(); // Eat the ']'.
4636       return ParseStatus::Success;
4637     }
4638 
4639     // There's an optional '#' token here. Normally there wouldn't be, but
4640     // inline assemble puts one in, and it's friendly to accept that.
4641     if (Parser.getTok().is(AsmToken::Hash))
4642       Parser.Lex(); // Eat '#' or '$'.
4643 
4644     const MCExpr *LaneIndex;
4645     SMLoc Loc = Parser.getTok().getLoc();
4646     if (getParser().parseExpression(LaneIndex))
4647       return Error(Loc, "illegal expression");
4648     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4649     if (!CE)
4650       return Error(Loc, "lane index must be empty or an integer");
4651     if (Parser.getTok().isNot(AsmToken::RBrac))
4652       return Error(Parser.getTok().getLoc(), "']' expected");
4653     EndLoc = Parser.getTok().getEndLoc();
4654     Parser.Lex(); // Eat the ']'.
4655     int64_t Val = CE->getValue();
4656 
4657     // FIXME: Make this range check context sensitive for .8, .16, .32.
4658     if (Val < 0 || Val > 7)
4659       return Error(Parser.getTok().getLoc(), "lane index out of range");
4660     Index = Val;
4661     LaneKind = IndexedLane;
4662     return ParseStatus::Success;
4663   }
4664   LaneKind = NoLanes;
4665   return ParseStatus::Success;
4666 }
4667 
4668 // parse a vector register list
parseVectorList(OperandVector & Operands)4669 ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4670   MCAsmParser &Parser = getParser();
4671   VectorLaneTy LaneKind;
4672   unsigned LaneIndex;
4673   SMLoc S = Parser.getTok().getLoc();
4674   // As an extension (to match gas), support a plain D register or Q register
4675   // (without encosing curly braces) as a single or double entry list,
4676   // respectively.
4677   if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4678     SMLoc E = Parser.getTok().getEndLoc();
4679     int Reg = tryParseRegister();
4680     if (Reg == -1)
4681       return ParseStatus::NoMatch;
4682     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4683       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4684       if (!Res.isSuccess())
4685         return Res;
4686       switch (LaneKind) {
4687       case NoLanes:
4688         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4689         break;
4690       case AllLanes:
4691         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4692                                                                 S, E));
4693         break;
4694       case IndexedLane:
4695         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4696                                                                LaneIndex,
4697                                                                false, S, E));
4698         break;
4699       }
4700       return ParseStatus::Success;
4701     }
4702     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4703       Reg = getDRegFromQReg(Reg);
4704       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4705       if (!Res.isSuccess())
4706         return Res;
4707       switch (LaneKind) {
4708       case NoLanes:
4709         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4710                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4711         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4712         break;
4713       case AllLanes:
4714         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4715                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4716         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4717                                                                 S, E));
4718         break;
4719       case IndexedLane:
4720         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4721                                                                LaneIndex,
4722                                                                false, S, E));
4723         break;
4724       }
4725       return ParseStatus::Success;
4726     }
4727     return Error(S, "vector register expected");
4728   }
4729 
4730   if (Parser.getTok().isNot(AsmToken::LCurly))
4731     return ParseStatus::NoMatch;
4732 
4733   Parser.Lex(); // Eat '{' token.
4734   SMLoc RegLoc = Parser.getTok().getLoc();
4735 
4736   int Reg = tryParseRegister();
4737   if (Reg == -1)
4738     return Error(RegLoc, "register expected");
4739   unsigned Count = 1;
4740   int Spacing = 0;
4741   unsigned FirstReg = Reg;
4742 
4743   if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4744     return Error(Parser.getTok().getLoc(),
4745                  "vector register in range Q0-Q7 expected");
4746   // The list is of D registers, but we also allow Q regs and just interpret
4747   // them as the two D sub-registers.
4748   else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4749     FirstReg = Reg = getDRegFromQReg(Reg);
4750     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4751                  // it's ambiguous with four-register single spaced.
4752     ++Reg;
4753     ++Count;
4754   }
4755 
4756   SMLoc E;
4757   if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4758     return ParseStatus::Failure;
4759 
4760   while (Parser.getTok().is(AsmToken::Comma) ||
4761          Parser.getTok().is(AsmToken::Minus)) {
4762     if (Parser.getTok().is(AsmToken::Minus)) {
4763       if (!Spacing)
4764         Spacing = 1; // Register range implies a single spaced list.
4765       else if (Spacing == 2)
4766         return Error(Parser.getTok().getLoc(),
4767                      "sequential registers in double spaced list");
4768       Parser.Lex(); // Eat the minus.
4769       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4770       int EndReg = tryParseRegister();
4771       if (EndReg == -1)
4772         return Error(AfterMinusLoc, "register expected");
4773       // Allow Q regs and just interpret them as the two D sub-registers.
4774       if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4775         EndReg = getDRegFromQReg(EndReg) + 1;
4776       // If the register is the same as the start reg, there's nothing
4777       // more to do.
4778       if (Reg == EndReg)
4779         continue;
4780       // The register must be in the same register class as the first.
4781       if ((hasMVE() &&
4782            !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4783           (!hasMVE() &&
4784            !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4785         return Error(AfterMinusLoc, "invalid register in register list");
4786       // Ranges must go from low to high.
4787       if (Reg > EndReg)
4788         return Error(AfterMinusLoc, "bad range in register list");
4789       // Parse the lane specifier if present.
4790       VectorLaneTy NextLaneKind;
4791       unsigned NextLaneIndex;
4792       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4793         return ParseStatus::Failure;
4794       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4795         return Error(AfterMinusLoc, "mismatched lane index in register list");
4796 
4797       // Add all the registers in the range to the register list.
4798       Count += EndReg - Reg;
4799       Reg = EndReg;
4800       continue;
4801     }
4802     Parser.Lex(); // Eat the comma.
4803     RegLoc = Parser.getTok().getLoc();
4804     int OldReg = Reg;
4805     Reg = tryParseRegister();
4806     if (Reg == -1)
4807       return Error(RegLoc, "register expected");
4808 
4809     if (hasMVE()) {
4810       if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4811         return Error(RegLoc, "vector register in range Q0-Q7 expected");
4812       Spacing = 1;
4813     }
4814     // vector register lists must be contiguous.
4815     // It's OK to use the enumeration values directly here rather, as the
4816     // VFP register classes have the enum sorted properly.
4817     //
4818     // The list is of D registers, but we also allow Q regs and just interpret
4819     // them as the two D sub-registers.
4820     else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4821       if (!Spacing)
4822         Spacing = 1; // Register range implies a single spaced list.
4823       else if (Spacing == 2)
4824         return Error(
4825             RegLoc,
4826             "invalid register in double-spaced list (must be 'D' register')");
4827       Reg = getDRegFromQReg(Reg);
4828       if (Reg != OldReg + 1)
4829         return Error(RegLoc, "non-contiguous register range");
4830       ++Reg;
4831       Count += 2;
4832       // Parse the lane specifier if present.
4833       VectorLaneTy NextLaneKind;
4834       unsigned NextLaneIndex;
4835       SMLoc LaneLoc = Parser.getTok().getLoc();
4836       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4837         return ParseStatus::Failure;
4838       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4839         return Error(LaneLoc, "mismatched lane index in register list");
4840       continue;
4841     }
4842     // Normal D register.
4843     // Figure out the register spacing (single or double) of the list if
4844     // we don't know it already.
4845     if (!Spacing)
4846       Spacing = 1 + (Reg == OldReg + 2);
4847 
4848     // Just check that it's contiguous and keep going.
4849     if (Reg != OldReg + Spacing)
4850       return Error(RegLoc, "non-contiguous register range");
4851     ++Count;
4852     // Parse the lane specifier if present.
4853     VectorLaneTy NextLaneKind;
4854     unsigned NextLaneIndex;
4855     SMLoc EndLoc = Parser.getTok().getLoc();
4856     if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4857       return ParseStatus::Failure;
4858     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4859       return Error(EndLoc, "mismatched lane index in register list");
4860   }
4861 
4862   if (Parser.getTok().isNot(AsmToken::RCurly))
4863     return Error(Parser.getTok().getLoc(), "'}' expected");
4864   E = Parser.getTok().getEndLoc();
4865   Parser.Lex(); // Eat '}' token.
4866 
4867   switch (LaneKind) {
4868   case NoLanes:
4869   case AllLanes: {
4870     // Two-register operands have been converted to the
4871     // composite register classes.
4872     if (Count == 2 && !hasMVE()) {
4873       const MCRegisterClass *RC = (Spacing == 1) ?
4874         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4875         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4876       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4877     }
4878     auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4879                    ARMOperand::CreateVectorListAllLanes);
4880     Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4881     break;
4882   }
4883   case IndexedLane:
4884     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4885                                                            LaneIndex,
4886                                                            (Spacing == 2),
4887                                                            S, E));
4888     break;
4889   }
4890   return ParseStatus::Success;
4891 }
4892 
4893 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
parseMemBarrierOptOperand(OperandVector & Operands)4894 ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4895   MCAsmParser &Parser = getParser();
4896   SMLoc S = Parser.getTok().getLoc();
4897   const AsmToken &Tok = Parser.getTok();
4898   unsigned Opt;
4899 
4900   if (Tok.is(AsmToken::Identifier)) {
4901     StringRef OptStr = Tok.getString();
4902 
4903     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4904       .Case("sy",    ARM_MB::SY)
4905       .Case("st",    ARM_MB::ST)
4906       .Case("ld",    ARM_MB::LD)
4907       .Case("sh",    ARM_MB::ISH)
4908       .Case("ish",   ARM_MB::ISH)
4909       .Case("shst",  ARM_MB::ISHST)
4910       .Case("ishst", ARM_MB::ISHST)
4911       .Case("ishld", ARM_MB::ISHLD)
4912       .Case("nsh",   ARM_MB::NSH)
4913       .Case("un",    ARM_MB::NSH)
4914       .Case("nshst", ARM_MB::NSHST)
4915       .Case("nshld", ARM_MB::NSHLD)
4916       .Case("unst",  ARM_MB::NSHST)
4917       .Case("osh",   ARM_MB::OSH)
4918       .Case("oshst", ARM_MB::OSHST)
4919       .Case("oshld", ARM_MB::OSHLD)
4920       .Default(~0U);
4921 
4922     // ishld, oshld, nshld and ld are only available from ARMv8.
4923     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4924                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4925       Opt = ~0U;
4926 
4927     if (Opt == ~0U)
4928       return ParseStatus::NoMatch;
4929 
4930     Parser.Lex(); // Eat identifier token.
4931   } else if (Tok.is(AsmToken::Hash) ||
4932              Tok.is(AsmToken::Dollar) ||
4933              Tok.is(AsmToken::Integer)) {
4934     if (Parser.getTok().isNot(AsmToken::Integer))
4935       Parser.Lex(); // Eat '#' or '$'.
4936     SMLoc Loc = Parser.getTok().getLoc();
4937 
4938     const MCExpr *MemBarrierID;
4939     if (getParser().parseExpression(MemBarrierID))
4940       return Error(Loc, "illegal expression");
4941 
4942     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4943     if (!CE)
4944       return Error(Loc, "constant expression expected");
4945 
4946     int Val = CE->getValue();
4947     if (Val & ~0xf)
4948       return Error(Loc, "immediate value out of range");
4949 
4950     Opt = ARM_MB::RESERVED_0 + Val;
4951   } else
4952     return ParseStatus::Failure;
4953 
4954   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4955   return ParseStatus::Success;
4956 }
4957 
4958 ParseStatus
parseTraceSyncBarrierOptOperand(OperandVector & Operands)4959 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4960   MCAsmParser &Parser = getParser();
4961   SMLoc S = Parser.getTok().getLoc();
4962   const AsmToken &Tok = Parser.getTok();
4963 
4964   if (Tok.isNot(AsmToken::Identifier))
4965     return ParseStatus::NoMatch;
4966 
4967   if (!Tok.getString().equals_insensitive("csync"))
4968     return ParseStatus::NoMatch;
4969 
4970   Parser.Lex(); // Eat identifier token.
4971 
4972   Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4973   return ParseStatus::Success;
4974 }
4975 
4976 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4977 ParseStatus
parseInstSyncBarrierOptOperand(OperandVector & Operands)4978 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4979   MCAsmParser &Parser = getParser();
4980   SMLoc S = Parser.getTok().getLoc();
4981   const AsmToken &Tok = Parser.getTok();
4982   unsigned Opt;
4983 
4984   if (Tok.is(AsmToken::Identifier)) {
4985     StringRef OptStr = Tok.getString();
4986 
4987     if (OptStr.equals_insensitive("sy"))
4988       Opt = ARM_ISB::SY;
4989     else
4990       return ParseStatus::NoMatch;
4991 
4992     Parser.Lex(); // Eat identifier token.
4993   } else if (Tok.is(AsmToken::Hash) ||
4994              Tok.is(AsmToken::Dollar) ||
4995              Tok.is(AsmToken::Integer)) {
4996     if (Parser.getTok().isNot(AsmToken::Integer))
4997       Parser.Lex(); // Eat '#' or '$'.
4998     SMLoc Loc = Parser.getTok().getLoc();
4999 
5000     const MCExpr *ISBarrierID;
5001     if (getParser().parseExpression(ISBarrierID))
5002       return Error(Loc, "illegal expression");
5003 
5004     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5005     if (!CE)
5006       return Error(Loc, "constant expression expected");
5007 
5008     int Val = CE->getValue();
5009     if (Val & ~0xf)
5010       return Error(Loc, "immediate value out of range");
5011 
5012     Opt = ARM_ISB::RESERVED_0 + Val;
5013   } else
5014     return ParseStatus::Failure;
5015 
5016   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5017           (ARM_ISB::InstSyncBOpt)Opt, S));
5018   return ParseStatus::Success;
5019 }
5020 
5021 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
parseProcIFlagsOperand(OperandVector & Operands)5022 ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5023   MCAsmParser &Parser = getParser();
5024   SMLoc S = Parser.getTok().getLoc();
5025   const AsmToken &Tok = Parser.getTok();
5026   if (!Tok.is(AsmToken::Identifier))
5027     return ParseStatus::NoMatch;
5028   StringRef IFlagsStr = Tok.getString();
5029 
5030   // An iflags string of "none" is interpreted to mean that none of the AIF
5031   // bits are set.  Not a terribly useful instruction, but a valid encoding.
5032   unsigned IFlags = 0;
5033   if (IFlagsStr != "none") {
5034         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5035       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5036         .Case("a", ARM_PROC::A)
5037         .Case("i", ARM_PROC::I)
5038         .Case("f", ARM_PROC::F)
5039         .Default(~0U);
5040 
5041       // If some specific iflag is already set, it means that some letter is
5042       // present more than once, this is not acceptable.
5043       if (Flag == ~0U || (IFlags & Flag))
5044         return ParseStatus::NoMatch;
5045 
5046       IFlags |= Flag;
5047     }
5048   }
5049 
5050   Parser.Lex(); // Eat identifier token.
5051   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5052   return ParseStatus::Success;
5053 }
5054 
5055 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
parseMSRMaskOperand(OperandVector & Operands)5056 ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5057   MCAsmParser &Parser = getParser();
5058   SMLoc S = Parser.getTok().getLoc();
5059   const AsmToken &Tok = Parser.getTok();
5060 
5061   if (Tok.is(AsmToken::Integer)) {
5062     int64_t Val = Tok.getIntVal();
5063     if (Val > 255 || Val < 0) {
5064       return ParseStatus::NoMatch;
5065     }
5066     unsigned SYSmvalue = Val & 0xFF;
5067     Parser.Lex();
5068     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5069     return ParseStatus::Success;
5070   }
5071 
5072   if (!Tok.is(AsmToken::Identifier))
5073     return ParseStatus::NoMatch;
5074   StringRef Mask = Tok.getString();
5075 
5076   if (isMClass()) {
5077     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5078     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5079       return ParseStatus::NoMatch;
5080 
5081     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5082 
5083     Parser.Lex(); // Eat identifier token.
5084     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5085     return ParseStatus::Success;
5086   }
5087 
5088   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5089   size_t Start = 0, Next = Mask.find('_');
5090   StringRef Flags = "";
5091   std::string SpecReg = Mask.slice(Start, Next).lower();
5092   if (Next != StringRef::npos)
5093     Flags = Mask.slice(Next+1, Mask.size());
5094 
5095   // FlagsVal contains the complete mask:
5096   // 3-0: Mask
5097   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5098   unsigned FlagsVal = 0;
5099 
5100   if (SpecReg == "apsr") {
5101     FlagsVal = StringSwitch<unsigned>(Flags)
5102     .Case("nzcvq",  0x8) // same as CPSR_f
5103     .Case("g",      0x4) // same as CPSR_s
5104     .Case("nzcvqg", 0xc) // same as CPSR_fs
5105     .Default(~0U);
5106 
5107     if (FlagsVal == ~0U) {
5108       if (!Flags.empty())
5109         return ParseStatus::NoMatch;
5110       else
5111         FlagsVal = 8; // No flag
5112     }
5113   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5114     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5115     if (Flags == "all" || Flags == "")
5116       Flags = "fc";
5117     for (int i = 0, e = Flags.size(); i != e; ++i) {
5118       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5119       .Case("c", 1)
5120       .Case("x", 2)
5121       .Case("s", 4)
5122       .Case("f", 8)
5123       .Default(~0U);
5124 
5125       // If some specific flag is already set, it means that some letter is
5126       // present more than once, this is not acceptable.
5127       if (Flag == ~0U || (FlagsVal & Flag))
5128         return ParseStatus::NoMatch;
5129       FlagsVal |= Flag;
5130     }
5131   } else // No match for special register.
5132     return ParseStatus::NoMatch;
5133 
5134   // Special register without flags is NOT equivalent to "fc" flags.
5135   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
5136   // two lines would enable gas compatibility at the expense of breaking
5137   // round-tripping.
5138   //
5139   // if (!FlagsVal)
5140   //  FlagsVal = 0x9;
5141 
5142   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5143   if (SpecReg == "spsr")
5144     FlagsVal |= 16;
5145 
5146   Parser.Lex(); // Eat identifier token.
5147   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5148   return ParseStatus::Success;
5149 }
5150 
5151 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5152 /// use in the MRS/MSR instructions added to support virtualization.
parseBankedRegOperand(OperandVector & Operands)5153 ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5154   MCAsmParser &Parser = getParser();
5155   SMLoc S = Parser.getTok().getLoc();
5156   const AsmToken &Tok = Parser.getTok();
5157   if (!Tok.is(AsmToken::Identifier))
5158     return ParseStatus::NoMatch;
5159   StringRef RegName = Tok.getString();
5160 
5161   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5162   if (!TheReg)
5163     return ParseStatus::NoMatch;
5164   unsigned Encoding = TheReg->Encoding;
5165 
5166   Parser.Lex(); // Eat identifier token.
5167   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5168   return ParseStatus::Success;
5169 }
5170 
parsePKHImm(OperandVector & Operands,StringRef Op,int Low,int High)5171 ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op,
5172                                       int Low, int High) {
5173   MCAsmParser &Parser = getParser();
5174   const AsmToken &Tok = Parser.getTok();
5175   if (Tok.isNot(AsmToken::Identifier))
5176     return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5177   StringRef ShiftName = Tok.getString();
5178   std::string LowerOp = Op.lower();
5179   std::string UpperOp = Op.upper();
5180   if (ShiftName != LowerOp && ShiftName != UpperOp)
5181     return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5182   Parser.Lex(); // Eat shift type token.
5183 
5184   // There must be a '#' and a shift amount.
5185   if (Parser.getTok().isNot(AsmToken::Hash) &&
5186       Parser.getTok().isNot(AsmToken::Dollar))
5187     return Error(Parser.getTok().getLoc(), "'#' expected");
5188   Parser.Lex(); // Eat hash token.
5189 
5190   const MCExpr *ShiftAmount;
5191   SMLoc Loc = Parser.getTok().getLoc();
5192   SMLoc EndLoc;
5193   if (getParser().parseExpression(ShiftAmount, EndLoc))
5194     return Error(Loc, "illegal expression");
5195   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5196   if (!CE)
5197     return Error(Loc, "constant expression expected");
5198   int Val = CE->getValue();
5199   if (Val < Low || Val > High)
5200     return Error(Loc, "immediate value out of range");
5201 
5202   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5203 
5204   return ParseStatus::Success;
5205 }
5206 
parseSetEndImm(OperandVector & Operands)5207 ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5208   MCAsmParser &Parser = getParser();
5209   const AsmToken &Tok = Parser.getTok();
5210   SMLoc S = Tok.getLoc();
5211   if (Tok.isNot(AsmToken::Identifier))
5212     return Error(S, "'be' or 'le' operand expected");
5213   int Val = StringSwitch<int>(Tok.getString().lower())
5214     .Case("be", 1)
5215     .Case("le", 0)
5216     .Default(-1);
5217   Parser.Lex(); // Eat the token.
5218 
5219   if (Val == -1)
5220     return Error(S, "'be' or 'le' operand expected");
5221   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5222                                                                   getContext()),
5223                                            S, Tok.getEndLoc()));
5224   return ParseStatus::Success;
5225 }
5226 
5227 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5228 /// instructions. Legal values are:
5229 ///     lsl #n  'n' in [0,31]
5230 ///     asr #n  'n' in [1,32]
5231 ///             n == 32 encoded as n == 0.
parseShifterImm(OperandVector & Operands)5232 ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5233   MCAsmParser &Parser = getParser();
5234   const AsmToken &Tok = Parser.getTok();
5235   SMLoc S = Tok.getLoc();
5236   if (Tok.isNot(AsmToken::Identifier))
5237     return Error(S, "shift operator 'asr' or 'lsl' expected");
5238   StringRef ShiftName = Tok.getString();
5239   bool isASR;
5240   if (ShiftName == "lsl" || ShiftName == "LSL")
5241     isASR = false;
5242   else if (ShiftName == "asr" || ShiftName == "ASR")
5243     isASR = true;
5244   else
5245     return Error(S, "shift operator 'asr' or 'lsl' expected");
5246   Parser.Lex(); // Eat the operator.
5247 
5248   // A '#' and a shift amount.
5249   if (Parser.getTok().isNot(AsmToken::Hash) &&
5250       Parser.getTok().isNot(AsmToken::Dollar))
5251     return Error(Parser.getTok().getLoc(), "'#' expected");
5252   Parser.Lex(); // Eat hash token.
5253   SMLoc ExLoc = Parser.getTok().getLoc();
5254 
5255   const MCExpr *ShiftAmount;
5256   SMLoc EndLoc;
5257   if (getParser().parseExpression(ShiftAmount, EndLoc))
5258     return Error(ExLoc, "malformed shift expression");
5259   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5260   if (!CE)
5261     return Error(ExLoc, "shift amount must be an immediate");
5262 
5263   int64_t Val = CE->getValue();
5264   if (isASR) {
5265     // Shift amount must be in [1,32]
5266     if (Val < 1 || Val > 32)
5267       return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5268     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5269     if (isThumb() && Val == 32)
5270       return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5271     if (Val == 32) Val = 0;
5272   } else {
5273     // Shift amount must be in [1,32]
5274     if (Val < 0 || Val > 31)
5275       return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5276   }
5277 
5278   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5279 
5280   return ParseStatus::Success;
5281 }
5282 
5283 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5284 /// of instructions. Legal values are:
5285 ///     ror #n  'n' in {0, 8, 16, 24}
parseRotImm(OperandVector & Operands)5286 ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5287   MCAsmParser &Parser = getParser();
5288   const AsmToken &Tok = Parser.getTok();
5289   SMLoc S = Tok.getLoc();
5290   if (Tok.isNot(AsmToken::Identifier))
5291     return ParseStatus::NoMatch;
5292   StringRef ShiftName = Tok.getString();
5293   if (ShiftName != "ror" && ShiftName != "ROR")
5294     return ParseStatus::NoMatch;
5295   Parser.Lex(); // Eat the operator.
5296 
5297   // A '#' and a rotate amount.
5298   if (Parser.getTok().isNot(AsmToken::Hash) &&
5299       Parser.getTok().isNot(AsmToken::Dollar))
5300     return Error(Parser.getTok().getLoc(), "'#' expected");
5301   Parser.Lex(); // Eat hash token.
5302   SMLoc ExLoc = Parser.getTok().getLoc();
5303 
5304   const MCExpr *ShiftAmount;
5305   SMLoc EndLoc;
5306   if (getParser().parseExpression(ShiftAmount, EndLoc))
5307     return Error(ExLoc, "malformed rotate expression");
5308   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5309   if (!CE)
5310     return Error(ExLoc, "rotate amount must be an immediate");
5311 
5312   int64_t Val = CE->getValue();
5313   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5314   // normally, zero is represented in asm by omitting the rotate operand
5315   // entirely.
5316   if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5317     return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5318 
5319   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5320 
5321   return ParseStatus::Success;
5322 }
5323 
parseModImm(OperandVector & Operands)5324 ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5325   MCAsmParser &Parser = getParser();
5326   MCAsmLexer &Lexer = getLexer();
5327   int64_t Imm1, Imm2;
5328 
5329   SMLoc S = Parser.getTok().getLoc();
5330 
5331   // 1) A mod_imm operand can appear in the place of a register name:
5332   //   add r0, #mod_imm
5333   //   add r0, r0, #mod_imm
5334   // to correctly handle the latter, we bail out as soon as we see an
5335   // identifier.
5336   //
5337   // 2) Similarly, we do not want to parse into complex operands:
5338   //   mov r0, #mod_imm
5339   //   mov r0, :lower16:(_foo)
5340   if (Parser.getTok().is(AsmToken::Identifier) ||
5341       Parser.getTok().is(AsmToken::Colon))
5342     return ParseStatus::NoMatch;
5343 
5344   // Hash (dollar) is optional as per the ARMARM
5345   if (Parser.getTok().is(AsmToken::Hash) ||
5346       Parser.getTok().is(AsmToken::Dollar)) {
5347     // Avoid parsing into complex operands (#:)
5348     if (Lexer.peekTok().is(AsmToken::Colon))
5349       return ParseStatus::NoMatch;
5350 
5351     // Eat the hash (dollar)
5352     Parser.Lex();
5353   }
5354 
5355   SMLoc Sx1, Ex1;
5356   Sx1 = Parser.getTok().getLoc();
5357   const MCExpr *Imm1Exp;
5358   if (getParser().parseExpression(Imm1Exp, Ex1))
5359     return Error(Sx1, "malformed expression");
5360 
5361   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5362 
5363   if (CE) {
5364     // Immediate must fit within 32-bits
5365     Imm1 = CE->getValue();
5366     int Enc = ARM_AM::getSOImmVal(Imm1);
5367     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5368       // We have a match!
5369       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5370                                                   (Enc & 0xF00) >> 7,
5371                                                   Sx1, Ex1));
5372       return ParseStatus::Success;
5373     }
5374 
5375     // We have parsed an immediate which is not for us, fallback to a plain
5376     // immediate. This can happen for instruction aliases. For an example,
5377     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5378     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5379     // instruction with a mod_imm operand. The alias is defined such that the
5380     // parser method is shared, that's why we have to do this here.
5381     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5382       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5383       return ParseStatus::Success;
5384     }
5385   } else {
5386     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5387     // MCFixup). Fallback to a plain immediate.
5388     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5389     return ParseStatus::Success;
5390   }
5391 
5392   // From this point onward, we expect the input to be a (#bits, #rot) pair
5393   if (Parser.getTok().isNot(AsmToken::Comma))
5394     return Error(Sx1,
5395                  "expected modified immediate operand: #[0, 255], #even[0-30]");
5396 
5397   if (Imm1 & ~0xFF)
5398     return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5399 
5400   // Eat the comma
5401   Parser.Lex();
5402 
5403   // Repeat for #rot
5404   SMLoc Sx2, Ex2;
5405   Sx2 = Parser.getTok().getLoc();
5406 
5407   // Eat the optional hash (dollar)
5408   if (Parser.getTok().is(AsmToken::Hash) ||
5409       Parser.getTok().is(AsmToken::Dollar))
5410     Parser.Lex();
5411 
5412   const MCExpr *Imm2Exp;
5413   if (getParser().parseExpression(Imm2Exp, Ex2))
5414     return Error(Sx2, "malformed expression");
5415 
5416   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5417 
5418   if (CE) {
5419     Imm2 = CE->getValue();
5420     if (!(Imm2 & ~0x1E)) {
5421       // We have a match!
5422       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5423       return ParseStatus::Success;
5424     }
5425     return Error(Sx2,
5426                  "immediate operand must an even number in the range [0, 30]");
5427   } else {
5428     return Error(Sx2, "constant expression expected");
5429   }
5430 }
5431 
parseBitfield(OperandVector & Operands)5432 ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5433   MCAsmParser &Parser = getParser();
5434   SMLoc S = Parser.getTok().getLoc();
5435   // The bitfield descriptor is really two operands, the LSB and the width.
5436   if (Parser.getTok().isNot(AsmToken::Hash) &&
5437       Parser.getTok().isNot(AsmToken::Dollar))
5438     return Error(Parser.getTok().getLoc(), "'#' expected");
5439   Parser.Lex(); // Eat hash token.
5440 
5441   const MCExpr *LSBExpr;
5442   SMLoc E = Parser.getTok().getLoc();
5443   if (getParser().parseExpression(LSBExpr))
5444     return Error(E, "malformed immediate expression");
5445   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5446   if (!CE)
5447     return Error(E, "'lsb' operand must be an immediate");
5448 
5449   int64_t LSB = CE->getValue();
5450   // The LSB must be in the range [0,31]
5451   if (LSB < 0 || LSB > 31)
5452     return Error(E, "'lsb' operand must be in the range [0,31]");
5453   E = Parser.getTok().getLoc();
5454 
5455   // Expect another immediate operand.
5456   if (Parser.getTok().isNot(AsmToken::Comma))
5457     return Error(Parser.getTok().getLoc(), "too few operands");
5458   Parser.Lex(); // Eat hash token.
5459   if (Parser.getTok().isNot(AsmToken::Hash) &&
5460       Parser.getTok().isNot(AsmToken::Dollar))
5461     return Error(Parser.getTok().getLoc(), "'#' expected");
5462   Parser.Lex(); // Eat hash token.
5463 
5464   const MCExpr *WidthExpr;
5465   SMLoc EndLoc;
5466   if (getParser().parseExpression(WidthExpr, EndLoc))
5467     return Error(E, "malformed immediate expression");
5468   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5469   if (!CE)
5470     return Error(E, "'width' operand must be an immediate");
5471 
5472   int64_t Width = CE->getValue();
5473   // The LSB must be in the range [1,32-lsb]
5474   if (Width < 1 || Width > 32 - LSB)
5475     return Error(E, "'width' operand must be in the range [1,32-lsb]");
5476 
5477   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5478 
5479   return ParseStatus::Success;
5480 }
5481 
parsePostIdxReg(OperandVector & Operands)5482 ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5483   // Check for a post-index addressing register operand. Specifically:
5484   // postidx_reg := '+' register {, shift}
5485   //              | '-' register {, shift}
5486   //              | register {, shift}
5487 
5488   // This method must return ParseStatus::NoMatch without consuming any tokens
5489   // in the case where there is no match, as other alternatives take other
5490   // parse methods.
5491   MCAsmParser &Parser = getParser();
5492   AsmToken Tok = Parser.getTok();
5493   SMLoc S = Tok.getLoc();
5494   bool haveEaten = false;
5495   bool isAdd = true;
5496   if (Tok.is(AsmToken::Plus)) {
5497     Parser.Lex(); // Eat the '+' token.
5498     haveEaten = true;
5499   } else if (Tok.is(AsmToken::Minus)) {
5500     Parser.Lex(); // Eat the '-' token.
5501     isAdd = false;
5502     haveEaten = true;
5503   }
5504 
5505   SMLoc E = Parser.getTok().getEndLoc();
5506   int Reg = tryParseRegister();
5507   if (Reg == -1) {
5508     if (!haveEaten)
5509       return ParseStatus::NoMatch;
5510     return Error(Parser.getTok().getLoc(), "register expected");
5511   }
5512 
5513   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5514   unsigned ShiftImm = 0;
5515   if (Parser.getTok().is(AsmToken::Comma)) {
5516     Parser.Lex(); // Eat the ','.
5517     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5518       return ParseStatus::Failure;
5519 
5520     // FIXME: Only approximates end...may include intervening whitespace.
5521     E = Parser.getTok().getLoc();
5522   }
5523 
5524   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5525                                                   ShiftImm, S, E));
5526 
5527   return ParseStatus::Success;
5528 }
5529 
parseAM3Offset(OperandVector & Operands)5530 ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5531   // Check for a post-index addressing register operand. Specifically:
5532   // am3offset := '+' register
5533   //              | '-' register
5534   //              | register
5535   //              | # imm
5536   //              | # + imm
5537   //              | # - imm
5538 
5539   // This method must return ParseStatus::NoMatch without consuming any tokens
5540   // in the case where there is no match, as other alternatives take other
5541   // parse methods.
5542   MCAsmParser &Parser = getParser();
5543   AsmToken Tok = Parser.getTok();
5544   SMLoc S = Tok.getLoc();
5545 
5546   // Do immediates first, as we always parse those if we have a '#'.
5547   if (Parser.getTok().is(AsmToken::Hash) ||
5548       Parser.getTok().is(AsmToken::Dollar)) {
5549     Parser.Lex(); // Eat '#' or '$'.
5550     // Explicitly look for a '-', as we need to encode negative zero
5551     // differently.
5552     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5553     const MCExpr *Offset;
5554     SMLoc E;
5555     if (getParser().parseExpression(Offset, E))
5556       return ParseStatus::Failure;
5557     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5558     if (!CE)
5559       return Error(S, "constant expression expected");
5560     // Negative zero is encoded as the flag value
5561     // std::numeric_limits<int32_t>::min().
5562     int32_t Val = CE->getValue();
5563     if (isNegative && Val == 0)
5564       Val = std::numeric_limits<int32_t>::min();
5565 
5566     Operands.push_back(
5567       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5568 
5569     return ParseStatus::Success;
5570   }
5571 
5572   bool haveEaten = false;
5573   bool isAdd = true;
5574   if (Tok.is(AsmToken::Plus)) {
5575     Parser.Lex(); // Eat the '+' token.
5576     haveEaten = true;
5577   } else if (Tok.is(AsmToken::Minus)) {
5578     Parser.Lex(); // Eat the '-' token.
5579     isAdd = false;
5580     haveEaten = true;
5581   }
5582 
5583   Tok = Parser.getTok();
5584   int Reg = tryParseRegister();
5585   if (Reg == -1) {
5586     if (!haveEaten)
5587       return ParseStatus::NoMatch;
5588     return Error(Tok.getLoc(), "register expected");
5589   }
5590 
5591   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5592                                                   0, S, Tok.getEndLoc()));
5593 
5594   return ParseStatus::Success;
5595 }
5596 
5597 /// Convert parsed operands to MCInst.  Needed here because this instruction
5598 /// only has two register operands, but multiplication is commutative so
5599 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
cvtThumbMultiply(MCInst & Inst,const OperandVector & Operands)5600 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5601                                     const OperandVector &Operands) {
5602   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5603   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5604   // If we have a three-operand form, make sure to set Rn to be the operand
5605   // that isn't the same as Rd.
5606   unsigned RegOp = 4;
5607   if (Operands.size() == 6 &&
5608       ((ARMOperand &)*Operands[4]).getReg() ==
5609           ((ARMOperand &)*Operands[3]).getReg())
5610     RegOp = 5;
5611   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5612   Inst.addOperand(Inst.getOperand(0));
5613   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5614 }
5615 
cvtThumbBranches(MCInst & Inst,const OperandVector & Operands)5616 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5617                                     const OperandVector &Operands) {
5618   int CondOp = -1, ImmOp = -1;
5619   switch(Inst.getOpcode()) {
5620     case ARM::tB:
5621     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
5622 
5623     case ARM::t2B:
5624     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5625 
5626     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5627   }
5628   // first decide whether or not the branch should be conditional
5629   // by looking at it's location relative to an IT block
5630   if(inITBlock()) {
5631     // inside an IT block we cannot have any conditional branches. any
5632     // such instructions needs to be converted to unconditional form
5633     switch(Inst.getOpcode()) {
5634       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5635       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5636     }
5637   } else {
5638     // outside IT blocks we can only have unconditional branches with AL
5639     // condition code or conditional branches with non-AL condition code
5640     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5641     switch(Inst.getOpcode()) {
5642       case ARM::tB:
5643       case ARM::tBcc:
5644         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5645         break;
5646       case ARM::t2B:
5647       case ARM::t2Bcc:
5648         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5649         break;
5650     }
5651   }
5652 
5653   // now decide on encoding size based on branch target range
5654   switch(Inst.getOpcode()) {
5655     // classify tB as either t2B or t1B based on range of immediate operand
5656     case ARM::tB: {
5657       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5658       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5659         Inst.setOpcode(ARM::t2B);
5660       break;
5661     }
5662     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5663     case ARM::tBcc: {
5664       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5665       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5666         Inst.setOpcode(ARM::t2Bcc);
5667       break;
5668     }
5669   }
5670   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5671   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5672 }
5673 
cvtMVEVMOVQtoDReg(MCInst & Inst,const OperandVector & Operands)5674 void ARMAsmParser::cvtMVEVMOVQtoDReg(
5675   MCInst &Inst, const OperandVector &Operands) {
5676 
5677   // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5678   assert(Operands.size() == 8);
5679 
5680   ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5681   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5682   ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5683   ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5684   // skip second copy of Qd in Operands[6]
5685   ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5686   ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5687 }
5688 
5689 /// Parse an ARM memory expression, return false if successful else return true
5690 /// or an error.  The first token must be a '[' when called.
parseMemory(OperandVector & Operands)5691 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5692   MCAsmParser &Parser = getParser();
5693   SMLoc S, E;
5694   if (Parser.getTok().isNot(AsmToken::LBrac))
5695     return TokError("Token is not a Left Bracket");
5696   S = Parser.getTok().getLoc();
5697   Parser.Lex(); // Eat left bracket token.
5698 
5699   const AsmToken &BaseRegTok = Parser.getTok();
5700   int BaseRegNum = tryParseRegister();
5701   if (BaseRegNum == -1)
5702     return Error(BaseRegTok.getLoc(), "register expected");
5703 
5704   // The next token must either be a comma, a colon or a closing bracket.
5705   const AsmToken &Tok = Parser.getTok();
5706   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5707       !Tok.is(AsmToken::RBrac))
5708     return Error(Tok.getLoc(), "malformed memory operand");
5709 
5710   if (Tok.is(AsmToken::RBrac)) {
5711     E = Tok.getEndLoc();
5712     Parser.Lex(); // Eat right bracket token.
5713 
5714     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5715                                              ARM_AM::no_shift, 0, 0, false,
5716                                              S, E));
5717 
5718     // If there's a pre-indexing writeback marker, '!', just add it as a token
5719     // operand. It's rather odd, but syntactically valid.
5720     if (Parser.getTok().is(AsmToken::Exclaim)) {
5721       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5722       Parser.Lex(); // Eat the '!'.
5723     }
5724 
5725     return false;
5726   }
5727 
5728   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5729          "Lost colon or comma in memory operand?!");
5730   if (Tok.is(AsmToken::Comma)) {
5731     Parser.Lex(); // Eat the comma.
5732   }
5733 
5734   // If we have a ':', it's an alignment specifier.
5735   if (Parser.getTok().is(AsmToken::Colon)) {
5736     Parser.Lex(); // Eat the ':'.
5737     E = Parser.getTok().getLoc();
5738     SMLoc AlignmentLoc = Tok.getLoc();
5739 
5740     const MCExpr *Expr;
5741     if (getParser().parseExpression(Expr))
5742      return true;
5743 
5744     // The expression has to be a constant. Memory references with relocations
5745     // don't come through here, as they use the <label> forms of the relevant
5746     // instructions.
5747     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5748     if (!CE)
5749       return Error (E, "constant expression expected");
5750 
5751     unsigned Align = 0;
5752     switch (CE->getValue()) {
5753     default:
5754       return Error(E,
5755                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5756     case 16:  Align = 2; break;
5757     case 32:  Align = 4; break;
5758     case 64:  Align = 8; break;
5759     case 128: Align = 16; break;
5760     case 256: Align = 32; break;
5761     }
5762 
5763     // Now we should have the closing ']'
5764     if (Parser.getTok().isNot(AsmToken::RBrac))
5765       return Error(Parser.getTok().getLoc(), "']' expected");
5766     E = Parser.getTok().getEndLoc();
5767     Parser.Lex(); // Eat right bracket token.
5768 
5769     // Don't worry about range checking the value here. That's handled by
5770     // the is*() predicates.
5771     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5772                                              ARM_AM::no_shift, 0, Align,
5773                                              false, S, E, AlignmentLoc));
5774 
5775     // If there's a pre-indexing writeback marker, '!', just add it as a token
5776     // operand.
5777     if (Parser.getTok().is(AsmToken::Exclaim)) {
5778       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5779       Parser.Lex(); // Eat the '!'.
5780     }
5781 
5782     return false;
5783   }
5784 
5785   // If we have a '#' or '$', it's an immediate offset, else assume it's a
5786   // register offset. Be friendly and also accept a plain integer or expression
5787   // (without a leading hash) for gas compatibility.
5788   if (Parser.getTok().is(AsmToken::Hash) ||
5789       Parser.getTok().is(AsmToken::Dollar) ||
5790       Parser.getTok().is(AsmToken::LParen) ||
5791       Parser.getTok().is(AsmToken::Integer)) {
5792     if (Parser.getTok().is(AsmToken::Hash) ||
5793         Parser.getTok().is(AsmToken::Dollar))
5794       Parser.Lex(); // Eat '#' or '$'
5795     E = Parser.getTok().getLoc();
5796 
5797     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5798     const MCExpr *Offset, *AdjustedOffset;
5799     if (getParser().parseExpression(Offset))
5800      return true;
5801 
5802     if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
5803       // If the constant was #-0, represent it as
5804       // std::numeric_limits<int32_t>::min().
5805       int32_t Val = CE->getValue();
5806       if (isNegative && Val == 0)
5807         CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5808                                     getContext());
5809       // Don't worry about range checking the value here. That's handled by
5810       // the is*() predicates.
5811       AdjustedOffset = CE;
5812     } else
5813       AdjustedOffset = Offset;
5814     Operands.push_back(ARMOperand::CreateMem(
5815         BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
5816 
5817     // Now we should have the closing ']'
5818     if (Parser.getTok().isNot(AsmToken::RBrac))
5819       return Error(Parser.getTok().getLoc(), "']' expected");
5820     E = Parser.getTok().getEndLoc();
5821     Parser.Lex(); // Eat right bracket token.
5822 
5823     // If there's a pre-indexing writeback marker, '!', just add it as a token
5824     // operand.
5825     if (Parser.getTok().is(AsmToken::Exclaim)) {
5826       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5827       Parser.Lex(); // Eat the '!'.
5828     }
5829 
5830     return false;
5831   }
5832 
5833   // The register offset is optionally preceded by a '+' or '-'
5834   bool isNegative = false;
5835   if (Parser.getTok().is(AsmToken::Minus)) {
5836     isNegative = true;
5837     Parser.Lex(); // Eat the '-'.
5838   } else if (Parser.getTok().is(AsmToken::Plus)) {
5839     // Nothing to do.
5840     Parser.Lex(); // Eat the '+'.
5841   }
5842 
5843   E = Parser.getTok().getLoc();
5844   int OffsetRegNum = tryParseRegister();
5845   if (OffsetRegNum == -1)
5846     return Error(E, "register expected");
5847 
5848   // If there's a shift operator, handle it.
5849   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5850   unsigned ShiftImm = 0;
5851   if (Parser.getTok().is(AsmToken::Comma)) {
5852     Parser.Lex(); // Eat the ','.
5853     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5854       return true;
5855   }
5856 
5857   // Now we should have the closing ']'
5858   if (Parser.getTok().isNot(AsmToken::RBrac))
5859     return Error(Parser.getTok().getLoc(), "']' expected");
5860   E = Parser.getTok().getEndLoc();
5861   Parser.Lex(); // Eat right bracket token.
5862 
5863   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5864                                            ShiftType, ShiftImm, 0, isNegative,
5865                                            S, E));
5866 
5867   // If there's a pre-indexing writeback marker, '!', just add it as a token
5868   // operand.
5869   if (Parser.getTok().is(AsmToken::Exclaim)) {
5870     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5871     Parser.Lex(); // Eat the '!'.
5872   }
5873 
5874   return false;
5875 }
5876 
5877 /// parseMemRegOffsetShift - one of these two:
5878 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5879 ///   rrx
5880 /// return true if it parses a shift otherwise it returns false.
parseMemRegOffsetShift(ARM_AM::ShiftOpc & St,unsigned & Amount)5881 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5882                                           unsigned &Amount) {
5883   MCAsmParser &Parser = getParser();
5884   SMLoc Loc = Parser.getTok().getLoc();
5885   const AsmToken &Tok = Parser.getTok();
5886   if (Tok.isNot(AsmToken::Identifier))
5887     return Error(Loc, "illegal shift operator");
5888   StringRef ShiftName = Tok.getString();
5889   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5890       ShiftName == "asl" || ShiftName == "ASL")
5891     St = ARM_AM::lsl;
5892   else if (ShiftName == "lsr" || ShiftName == "LSR")
5893     St = ARM_AM::lsr;
5894   else if (ShiftName == "asr" || ShiftName == "ASR")
5895     St = ARM_AM::asr;
5896   else if (ShiftName == "ror" || ShiftName == "ROR")
5897     St = ARM_AM::ror;
5898   else if (ShiftName == "rrx" || ShiftName == "RRX")
5899     St = ARM_AM::rrx;
5900   else if (ShiftName == "uxtw" || ShiftName == "UXTW")
5901     St = ARM_AM::uxtw;
5902   else
5903     return Error(Loc, "illegal shift operator");
5904   Parser.Lex(); // Eat shift type token.
5905 
5906   // rrx stands alone.
5907   Amount = 0;
5908   if (St != ARM_AM::rrx) {
5909     Loc = Parser.getTok().getLoc();
5910     // A '#' and a shift amount.
5911     const AsmToken &HashTok = Parser.getTok();
5912     if (HashTok.isNot(AsmToken::Hash) &&
5913         HashTok.isNot(AsmToken::Dollar))
5914       return Error(HashTok.getLoc(), "'#' expected");
5915     Parser.Lex(); // Eat hash token.
5916 
5917     const MCExpr *Expr;
5918     if (getParser().parseExpression(Expr))
5919       return true;
5920     // Range check the immediate.
5921     // lsl, ror: 0 <= imm <= 31
5922     // lsr, asr: 0 <= imm <= 32
5923     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5924     if (!CE)
5925       return Error(Loc, "shift amount must be an immediate");
5926     int64_t Imm = CE->getValue();
5927     if (Imm < 0 ||
5928         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5929         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5930       return Error(Loc, "immediate shift value out of range");
5931     // If <ShiftTy> #0, turn it into a no_shift.
5932     if (Imm == 0)
5933       St = ARM_AM::lsl;
5934     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5935     if (Imm == 32)
5936       Imm = 0;
5937     Amount = Imm;
5938   }
5939 
5940   return false;
5941 }
5942 
5943 /// parseFPImm - A floating point immediate expression operand.
parseFPImm(OperandVector & Operands)5944 ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
5945   MCAsmParser &Parser = getParser();
5946   // Anything that can accept a floating point constant as an operand
5947   // needs to go through here, as the regular parseExpression is
5948   // integer only.
5949   //
5950   // This routine still creates a generic Immediate operand, containing
5951   // a bitcast of the 64-bit floating point value. The various operands
5952   // that accept floats can check whether the value is valid for them
5953   // via the standard is*() predicates.
5954 
5955   SMLoc S = Parser.getTok().getLoc();
5956 
5957   if (Parser.getTok().isNot(AsmToken::Hash) &&
5958       Parser.getTok().isNot(AsmToken::Dollar))
5959     return ParseStatus::NoMatch;
5960 
5961   // Disambiguate the VMOV forms that can accept an FP immediate.
5962   // vmov.f32 <sreg>, #imm
5963   // vmov.f64 <dreg>, #imm
5964   // vmov.f32 <dreg>, #imm  @ vector f32x2
5965   // vmov.f32 <qreg>, #imm  @ vector f32x4
5966   //
5967   // There are also the NEON VMOV instructions which expect an
5968   // integer constant. Make sure we don't try to parse an FPImm
5969   // for these:
5970   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5971   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5972   bool isVmovf = TyOp.isToken() &&
5973                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5974                   TyOp.getToken() == ".f16");
5975   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5976   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5977                                          Mnemonic.getToken() == "fconsts");
5978   if (!(isVmovf || isFconst))
5979     return ParseStatus::NoMatch;
5980 
5981   Parser.Lex(); // Eat '#' or '$'.
5982 
5983   // Handle negation, as that still comes through as a separate token.
5984   bool isNegative = false;
5985   if (Parser.getTok().is(AsmToken::Minus)) {
5986     isNegative = true;
5987     Parser.Lex();
5988   }
5989   const AsmToken &Tok = Parser.getTok();
5990   SMLoc Loc = Tok.getLoc();
5991   if (Tok.is(AsmToken::Real) && isVmovf) {
5992     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5993     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5994     // If we had a '-' in front, toggle the sign bit.
5995     IntVal ^= (uint64_t)isNegative << 31;
5996     Parser.Lex(); // Eat the token.
5997     Operands.push_back(ARMOperand::CreateImm(
5998           MCConstantExpr::create(IntVal, getContext()),
5999           S, Parser.getTok().getLoc()));
6000     return ParseStatus::Success;
6001   }
6002   // Also handle plain integers. Instructions which allow floating point
6003   // immediates also allow a raw encoded 8-bit value.
6004   if (Tok.is(AsmToken::Integer) && isFconst) {
6005     int64_t Val = Tok.getIntVal();
6006     Parser.Lex(); // Eat the token.
6007     if (Val > 255 || Val < 0)
6008       return Error(Loc, "encoded floating point value out of range");
6009     float RealVal = ARM_AM::getFPImmFloat(Val);
6010     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6011 
6012     Operands.push_back(ARMOperand::CreateImm(
6013         MCConstantExpr::create(Val, getContext()), S,
6014         Parser.getTok().getLoc()));
6015     return ParseStatus::Success;
6016   }
6017 
6018   return Error(Loc, "invalid floating point immediate");
6019 }
6020 
6021 /// Parse a arm instruction operand.  For now this parses the operand regardless
6022 /// of the mnemonic.
parseOperand(OperandVector & Operands,StringRef Mnemonic)6023 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6024   MCAsmParser &Parser = getParser();
6025   SMLoc S, E;
6026 
6027   // Check if the current operand has a custom associated parser, if so, try to
6028   // custom parse the operand, or fallback to the general approach.
6029   ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6030   if (ResTy.isSuccess())
6031     return false;
6032   // If there wasn't a custom match, try the generic matcher below. Otherwise,
6033   // there was a match, but an error occurred, in which case, just return that
6034   // the operand parsing failed.
6035   if (ResTy.isFailure())
6036     return true;
6037 
6038   switch (getLexer().getKind()) {
6039   default:
6040     Error(Parser.getTok().getLoc(), "unexpected token in operand");
6041     return true;
6042   case AsmToken::Identifier: {
6043     // If we've seen a branch mnemonic, the next operand must be a label.  This
6044     // is true even if the label is a register name.  So "br r1" means branch to
6045     // label "r1".
6046     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6047     if (!ExpectLabel) {
6048       if (!tryParseRegisterWithWriteBack(Operands))
6049         return false;
6050       int Res = tryParseShiftRegister(Operands);
6051       if (Res == 0) // success
6052         return false;
6053       else if (Res == -1) // irrecoverable error
6054         return true;
6055       // If this is VMRS, check for the apsr_nzcv operand.
6056       if (Mnemonic == "vmrs" &&
6057           Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6058         S = Parser.getTok().getLoc();
6059         Parser.Lex();
6060         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6061         return false;
6062       }
6063     }
6064 
6065     // Fall though for the Identifier case that is not a register or a
6066     // special name.
6067     [[fallthrough]];
6068   }
6069   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
6070   case AsmToken::Integer: // things like 1f and 2b as a branch targets
6071   case AsmToken::String:  // quoted label names.
6072   case AsmToken::Dot: {   // . as a branch target
6073     // This was not a register so parse other operands that start with an
6074     // identifier (like labels) as expressions and create them as immediates.
6075     const MCExpr *IdVal;
6076     S = Parser.getTok().getLoc();
6077     if (getParser().parseExpression(IdVal))
6078       return true;
6079     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6080     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6081     return false;
6082   }
6083   case AsmToken::LBrac:
6084     return parseMemory(Operands);
6085   case AsmToken::LCurly:
6086     return parseRegisterList(Operands, !Mnemonic.starts_with("clr"));
6087   case AsmToken::Dollar:
6088   case AsmToken::Hash: {
6089     // #42 -> immediate
6090     // $ 42 -> immediate
6091     // $foo -> symbol name
6092     // $42 -> symbol name
6093     S = Parser.getTok().getLoc();
6094 
6095     // Favor the interpretation of $-prefixed operands as symbol names.
6096     // Cases where immediates are explicitly expected are handled by their
6097     // specific ParseMethod implementations.
6098     auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6099     bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6100                             (AdjacentToken.is(AsmToken::Identifier) ||
6101                              AdjacentToken.is(AsmToken::Integer));
6102     if (!ExpectIdentifier) {
6103       // Token is not part of identifier. Drop leading $ or # before parsing
6104       // expression.
6105       Parser.Lex();
6106     }
6107 
6108     if (Parser.getTok().isNot(AsmToken::Colon)) {
6109       bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6110       const MCExpr *ImmVal;
6111       if (getParser().parseExpression(ImmVal))
6112         return true;
6113       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6114       if (CE) {
6115         int32_t Val = CE->getValue();
6116         if (IsNegative && Val == 0)
6117           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6118                                           getContext());
6119       }
6120       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6121       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6122 
6123       // There can be a trailing '!' on operands that we want as a separate
6124       // '!' Token operand. Handle that here. For example, the compatibility
6125       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6126       if (Parser.getTok().is(AsmToken::Exclaim)) {
6127         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6128                                                    Parser.getTok().getLoc()));
6129         Parser.Lex(); // Eat exclaim token
6130       }
6131       return false;
6132     }
6133     // w/ a ':' after the '#', it's just like a plain ':'.
6134     [[fallthrough]];
6135   }
6136   case AsmToken::Colon: {
6137     S = Parser.getTok().getLoc();
6138     // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6139     // ":upper8_15:", expression prefixes
6140     // FIXME: Check it's an expression prefix,
6141     // e.g. (FOO - :lower16:BAR) isn't legal.
6142     ARMMCExpr::VariantKind RefKind;
6143     if (parsePrefix(RefKind))
6144       return true;
6145 
6146     const MCExpr *SubExprVal;
6147     if (getParser().parseExpression(SubExprVal))
6148       return true;
6149 
6150     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6151                                               getContext());
6152     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6153     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6154     return false;
6155   }
6156   case AsmToken::Equal: {
6157     S = Parser.getTok().getLoc();
6158     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6159       return Error(S, "unexpected token in operand");
6160     Parser.Lex(); // Eat '='
6161     const MCExpr *SubExprVal;
6162     if (getParser().parseExpression(SubExprVal))
6163       return true;
6164     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6165 
6166     // execute-only: we assume that assembly programmers know what they are
6167     // doing and allow literal pool creation here
6168     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6169     return false;
6170   }
6171   }
6172 }
6173 
parseImmExpr(int64_t & Out)6174 bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6175   const MCExpr *Expr = nullptr;
6176   SMLoc L = getParser().getTok().getLoc();
6177   if (check(getParser().parseExpression(Expr), L, "expected expression"))
6178     return true;
6179   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6180   if (check(!Value, L, "expected constant expression"))
6181     return true;
6182   Out = Value->getValue();
6183   return false;
6184 }
6185 
6186 // parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6187 // :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6188 // :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
parsePrefix(ARMMCExpr::VariantKind & RefKind)6189 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6190   MCAsmParser &Parser = getParser();
6191   RefKind = ARMMCExpr::VK_ARM_None;
6192 
6193   // consume an optional '#' (GNU compatibility)
6194   if (getLexer().is(AsmToken::Hash))
6195     Parser.Lex();
6196 
6197   assert(getLexer().is(AsmToken::Colon) && "expected a :");
6198   Parser.Lex(); // Eat ':'
6199 
6200   if (getLexer().isNot(AsmToken::Identifier)) {
6201     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6202     return true;
6203   }
6204 
6205   enum {
6206     COFF = (1 << MCContext::IsCOFF),
6207     ELF = (1 << MCContext::IsELF),
6208     MACHO = (1 << MCContext::IsMachO),
6209     WASM = (1 << MCContext::IsWasm),
6210   };
6211   static const struct PrefixEntry {
6212     const char *Spelling;
6213     ARMMCExpr::VariantKind VariantKind;
6214     uint8_t SupportedFormats;
6215   } PrefixEntries[] = {
6216       {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6217       {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6218       {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6219       {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6220       {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6221       {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6222   };
6223 
6224   StringRef IDVal = Parser.getTok().getIdentifier();
6225 
6226   const auto &Prefix =
6227       llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6228         return PE.Spelling == IDVal;
6229       });
6230   if (Prefix == std::end(PrefixEntries)) {
6231     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6232     return true;
6233   }
6234 
6235   uint8_t CurrentFormat;
6236   switch (getContext().getObjectFileType()) {
6237   case MCContext::IsMachO:
6238     CurrentFormat = MACHO;
6239     break;
6240   case MCContext::IsELF:
6241     CurrentFormat = ELF;
6242     break;
6243   case MCContext::IsCOFF:
6244     CurrentFormat = COFF;
6245     break;
6246   case MCContext::IsWasm:
6247     CurrentFormat = WASM;
6248     break;
6249   case MCContext::IsGOFF:
6250   case MCContext::IsSPIRV:
6251   case MCContext::IsXCOFF:
6252   case MCContext::IsDXContainer:
6253     llvm_unreachable("unexpected object format");
6254     break;
6255   }
6256 
6257   if (~Prefix->SupportedFormats & CurrentFormat) {
6258     Error(Parser.getTok().getLoc(),
6259           "cannot represent relocation in the current file format");
6260     return true;
6261   }
6262 
6263   RefKind = Prefix->VariantKind;
6264   Parser.Lex();
6265 
6266   if (getLexer().isNot(AsmToken::Colon)) {
6267     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6268     return true;
6269   }
6270   Parser.Lex(); // Eat the last ':'
6271 
6272   // consume an optional trailing '#' (GNU compatibility) bla
6273   parseOptionalToken(AsmToken::Hash);
6274 
6275   return false;
6276 }
6277 
6278 /// Given a mnemonic, split out possible predication code and carry
6279 /// setting letters to form a canonical mnemonic and flags.
6280 //
6281 // FIXME: Would be nice to autogen this.
6282 // FIXME: This is a bit of a maze of special cases.
splitMnemonic(StringRef Mnemonic,StringRef ExtraToken,unsigned & PredicationCode,unsigned & VPTPredicationCode,bool & CarrySetting,unsigned & ProcessorIMod,StringRef & ITMask)6283 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
6284                                       StringRef ExtraToken,
6285                                       unsigned &PredicationCode,
6286                                       unsigned &VPTPredicationCode,
6287                                       bool &CarrySetting,
6288                                       unsigned &ProcessorIMod,
6289                                       StringRef &ITMask) {
6290   PredicationCode = ARMCC::AL;
6291   VPTPredicationCode = ARMVCC::None;
6292   CarrySetting = false;
6293   ProcessorIMod = 0;
6294 
6295   // Ignore some mnemonics we know aren't predicated forms.
6296   //
6297   // FIXME: Would be nice to autogen this.
6298   if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6299       Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6300       Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6301       Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6302       Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6303       Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6304       Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6305       Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6306       Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6307       Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6308       Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6309       Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6310       Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6311       Mnemonic.starts_with("vsel") || Mnemonic == "vins" ||
6312       Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6313       Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6314       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6315       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6316       Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6317       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6318       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6319       Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6320       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6321     return Mnemonic;
6322 
6323   // First, split out any predication code. Ignore mnemonics we know aren't
6324   // predicated but do have a carry-set and so weren't caught above.
6325   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6326       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6327       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6328       Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6329       !(hasMVE() &&
6330         (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6331          Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6332          Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6333          Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6334          Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6335          Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6336          Mnemonic.starts_with("vq")))) {
6337     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6338     if (CC != ~0U) {
6339       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6340       PredicationCode = CC;
6341     }
6342   }
6343 
6344   // Next, determine if we have a carry setting bit. We explicitly ignore all
6345   // the instructions we know end in 's'.
6346   if (Mnemonic.ends_with("s") &&
6347       !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6348         Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6349         Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6350         Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6351         Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6352         Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6353         Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6354         Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6355         Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6356         Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6357         (Mnemonic == "movs" && isThumb()))) {
6358     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6359     CarrySetting = true;
6360   }
6361 
6362   // The "cps" instruction can have a interrupt mode operand which is glued into
6363   // the mnemonic. Check if this is the case, split it and parse the imod op
6364   if (Mnemonic.starts_with("cps")) {
6365     // Split out any imod code.
6366     unsigned IMod =
6367       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6368       .Case("ie", ARM_PROC::IE)
6369       .Case("id", ARM_PROC::ID)
6370       .Default(~0U);
6371     if (IMod != ~0U) {
6372       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6373       ProcessorIMod = IMod;
6374     }
6375   }
6376 
6377   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6378       Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6379       Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6380       Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6381       Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6382       Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6383       Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6384     unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
6385     if (CC != ~0U) {
6386       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6387       VPTPredicationCode = CC;
6388     }
6389     return Mnemonic;
6390   }
6391 
6392   // The "it" instruction has the condition mask on the end of the mnemonic.
6393   if (Mnemonic.starts_with("it")) {
6394     ITMask = Mnemonic.slice(2, Mnemonic.size());
6395     Mnemonic = Mnemonic.slice(0, 2);
6396   }
6397 
6398   if (Mnemonic.starts_with("vpst")) {
6399     ITMask = Mnemonic.slice(4, Mnemonic.size());
6400     Mnemonic = Mnemonic.slice(0, 4);
6401   } else if (Mnemonic.starts_with("vpt")) {
6402     ITMask = Mnemonic.slice(3, Mnemonic.size());
6403     Mnemonic = Mnemonic.slice(0, 3);
6404   }
6405 
6406   return Mnemonic;
6407 }
6408 
6409 /// Given a canonical mnemonic, determine if the instruction ever allows
6410 /// inclusion of carry set or predication code operands.
6411 //
6412 // FIXME: It would be nice to autogen this.
getMnemonicAcceptInfo(StringRef Mnemonic,StringRef ExtraToken,StringRef FullInst,bool & CanAcceptCarrySet,bool & CanAcceptPredicationCode,bool & CanAcceptVPTPredicationCode)6413 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6414                                          StringRef ExtraToken,
6415                                          StringRef FullInst,
6416                                          bool &CanAcceptCarrySet,
6417                                          bool &CanAcceptPredicationCode,
6418                                          bool &CanAcceptVPTPredicationCode) {
6419   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6420 
6421   CanAcceptCarrySet =
6422       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6423       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6424       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6425       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6426       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6427       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6428       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6429       (!isThumb() &&
6430        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6431         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6432 
6433   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6434       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6435       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6436       Mnemonic.starts_with("crc32") || Mnemonic.starts_with("cps") ||
6437       Mnemonic.starts_with("vsel") || Mnemonic == "vmaxnm" ||
6438       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6439       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6440       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6441       Mnemonic.starts_with("aes") || Mnemonic == "hvc" ||
6442       Mnemonic == "setpan" || Mnemonic.starts_with("sha1") ||
6443       Mnemonic.starts_with("sha256") ||
6444       (FullInst.starts_with("vmull") && FullInst.ends_with(".p64")) ||
6445       Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6446       Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6447       Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6448       Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6449       Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6450       Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6451       Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6452       Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6453       Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6454       Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6455       Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6456       (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6457        !MS.isITPredicableCDEInstr(Mnemonic)) ||
6458       Mnemonic.starts_with("vpt") || Mnemonic.starts_with("vpst") ||
6459       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6460       Mnemonic == "bti" ||
6461       (hasMVE() &&
6462        (Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vld2") ||
6463         Mnemonic.starts_with("vst4") || Mnemonic.starts_with("vld4") ||
6464         Mnemonic.starts_with("wlstp") || Mnemonic.starts_with("dlstp") ||
6465         Mnemonic.starts_with("letp")))) {
6466     // These mnemonics are never predicable
6467     CanAcceptPredicationCode = false;
6468   } else if (!isThumb()) {
6469     // Some instructions are only predicable in Thumb mode
6470     CanAcceptPredicationCode =
6471         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6472         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6473         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6474         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6475         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6476         Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6477         !Mnemonic.starts_with("rfe") && !Mnemonic.starts_with("srs");
6478   } else if (isThumbOne()) {
6479     if (hasV6MOps())
6480       CanAcceptPredicationCode = Mnemonic != "movs";
6481     else
6482       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6483   } else
6484     CanAcceptPredicationCode = true;
6485 }
6486 
6487 // Some Thumb instructions have two operand forms that are not
6488 // available as three operand, convert to two operand form if possible.
6489 //
6490 // FIXME: We would really like to be able to tablegen'erate this.
tryConvertingToTwoOperandForm(StringRef Mnemonic,bool CarrySetting,OperandVector & Operands)6491 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6492                                                  bool CarrySetting,
6493                                                  OperandVector &Operands) {
6494   if (Operands.size() != 6)
6495     return;
6496 
6497   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6498         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6499   if (!Op3.isReg() || !Op4.isReg())
6500     return;
6501 
6502   auto Op3Reg = Op3.getReg();
6503   auto Op4Reg = Op4.getReg();
6504 
6505   // For most Thumb2 cases we just generate the 3 operand form and reduce
6506   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6507   // won't accept SP or PC so we do the transformation here taking care
6508   // with immediate range in the 'add sp, sp #imm' case.
6509   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6510   if (isThumbTwo()) {
6511     if (Mnemonic != "add")
6512       return;
6513     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6514                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6515     if (!TryTransform) {
6516       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6517                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6518                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6519                        Op5.isImm() && !Op5.isImm0_508s4());
6520     }
6521     if (!TryTransform)
6522       return;
6523   } else if (!isThumbOne())
6524     return;
6525 
6526   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6527         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6528         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6529         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6530     return;
6531 
6532   // If first 2 operands of a 3 operand instruction are the same
6533   // then transform to 2 operand version of the same instruction
6534   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6535   bool Transform = Op3Reg == Op4Reg;
6536 
6537   // For communtative operations, we might be able to transform if we swap
6538   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6539   // as tADDrsp.
6540   const ARMOperand *LastOp = &Op5;
6541   bool Swap = false;
6542   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6543       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6544        Mnemonic == "and" || Mnemonic == "eor" ||
6545        Mnemonic == "adc" || Mnemonic == "orr")) {
6546     Swap = true;
6547     LastOp = &Op4;
6548     Transform = true;
6549   }
6550 
6551   // If both registers are the same then remove one of them from
6552   // the operand list, with certain exceptions.
6553   if (Transform) {
6554     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6555     // 2 operand forms don't exist.
6556     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6557         LastOp->isReg())
6558       Transform = false;
6559 
6560     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6561     // 3-bits because the ARMARM says not to.
6562     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6563       Transform = false;
6564   }
6565 
6566   if (Transform) {
6567     if (Swap)
6568       std::swap(Op4, Op5);
6569     Operands.erase(Operands.begin() + 3);
6570   }
6571 }
6572 
6573 // this function returns true if the operand is one of the following
6574 // relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
isThumbI8Relocation(MCParsedAsmOperand & MCOp)6575 static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
6576   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6577   if (!Op.isImm())
6578     return false;
6579   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6580   if (CE)
6581     return false;
6582   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6583   if (!E)
6584     return false;
6585   const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6586   if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6587                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6588                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6589                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6590     return true;
6591   return false;
6592 }
6593 
shouldOmitCCOutOperand(StringRef Mnemonic,OperandVector & Operands)6594 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6595                                           OperandVector &Operands) {
6596   // FIXME: This is all horribly hacky. We really need a better way to deal
6597   // with optional operands like this in the matcher table.
6598 
6599   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6600   // another does not. Specifically, the MOVW instruction does not. So we
6601   // special case it here and remove the defaulted (non-setting) cc_out
6602   // operand if that's the instruction we're trying to match.
6603   //
6604   // We do this as post-processing of the explicit operands rather than just
6605   // conditionally adding the cc_out in the first place because we need
6606   // to check the type of the parsed immediate operand.
6607   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6608       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6609       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6610       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6611     return true;
6612 
6613   if (Mnemonic == "movs" && Operands.size() > 3 && isThumb() &&
6614       isThumbI8Relocation(*Operands[3]))
6615     return true;
6616 
6617   // Register-register 'add' for thumb does not have a cc_out operand
6618   // when there are only two register operands.
6619   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6620       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6621       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6622       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6623     return true;
6624   // Register-register 'add' for thumb does not have a cc_out operand
6625   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6626   // have to check the immediate range here since Thumb2 has a variant
6627   // that can handle a different range and has a cc_out operand.
6628   if (((isThumb() && Mnemonic == "add") ||
6629        (isThumbTwo() && Mnemonic == "sub")) &&
6630       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6631       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6632       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6633       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6634       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6635        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6636     return true;
6637   // For Thumb2, add/sub immediate does not have a cc_out operand for the
6638   // imm0_4095 variant. That's the least-preferred variant when
6639   // selecting via the generic "add" mnemonic, so to know that we
6640   // should remove the cc_out operand, we have to explicitly check that
6641   // it's not one of the other variants. Ugh.
6642   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6643       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6644       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6645       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6646     // Nest conditions rather than one big 'if' statement for readability.
6647     //
6648     // If both registers are low, we're in an IT block, and the immediate is
6649     // in range, we should use encoding T1 instead, which has a cc_out.
6650     if (inITBlock() &&
6651         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6652         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6653         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6654       return false;
6655     // Check against T3. If the second register is the PC, this is an
6656     // alternate form of ADR, which uses encoding T4, so check for that too.
6657     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6658         (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
6659          static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
6660       return false;
6661 
6662     // Otherwise, we use encoding T4, which does not have a cc_out
6663     // operand.
6664     return true;
6665   }
6666 
6667   // The thumb2 multiply instruction doesn't have a CCOut register, so
6668   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6669   // use the 16-bit encoding or not.
6670   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6671       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6672       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6673       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6674       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6675       // If the registers aren't low regs, the destination reg isn't the
6676       // same as one of the source regs, or the cc_out operand is zero
6677       // outside of an IT block, we have to use the 32-bit encoding, so
6678       // remove the cc_out operand.
6679       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6680        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6681        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6682        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6683                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6684                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6685                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
6686     return true;
6687 
6688   // Also check the 'mul' syntax variant that doesn't specify an explicit
6689   // destination register.
6690   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6691       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6692       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6693       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6694       // If the registers aren't low regs  or the cc_out operand is zero
6695       // outside of an IT block, we have to use the 32-bit encoding, so
6696       // remove the cc_out operand.
6697       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6698        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6699        !inITBlock()))
6700     return true;
6701 
6702   // Register-register 'add/sub' for thumb does not have a cc_out operand
6703   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6704   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6705   // right, this will result in better diagnostics (which operand is off)
6706   // anyway.
6707   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6708       (Operands.size() == 5 || Operands.size() == 6) &&
6709       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6710       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6711       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6712       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6713        (Operands.size() == 6 &&
6714         static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
6715     // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
6716     return (!(isThumbTwo() &&
6717               (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
6718                static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
6719   }
6720   // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
6721   // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
6722   // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
6723   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6724       (Operands.size() == 5) &&
6725       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6726       static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
6727       static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
6728       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6729       static_cast<ARMOperand &>(*Operands[4]).isImm()) {
6730     const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
6731     if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6732       return false; // add.w / sub.w
6733     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6734       const int64_t Value = CE->getValue();
6735       // Thumb1 imm8 sub / add
6736       if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
6737           isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
6738         return false;
6739       return true; // Thumb2 T4 addw / subw
6740     }
6741   }
6742   return false;
6743 }
6744 
shouldOmitPredicateOperand(StringRef Mnemonic,OperandVector & Operands)6745 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6746                                               OperandVector &Operands) {
6747   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6748   unsigned RegIdx = 3;
6749   if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
6750       Mnemonic == "vrintr") &&
6751       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6752        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6753     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6754         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6755          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6756       RegIdx = 4;
6757 
6758     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6759         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6760              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6761          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6762              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6763       return true;
6764   }
6765   return false;
6766 }
6767 
shouldOmitVectorPredicateOperand(StringRef Mnemonic,OperandVector & Operands)6768 bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6769                                                     OperandVector &Operands) {
6770   if (!hasMVE() || Operands.size() < 3)
6771     return true;
6772 
6773   if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
6774       Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vst4"))
6775     return true;
6776 
6777   if (Mnemonic.starts_with("vctp") || Mnemonic.starts_with("vpnot"))
6778     return false;
6779 
6780   if (Mnemonic.starts_with("vmov") &&
6781       !(Mnemonic.starts_with("vmovl") || Mnemonic.starts_with("vmovn") ||
6782         Mnemonic.starts_with("vmovx"))) {
6783     for (auto &Operand : Operands) {
6784       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6785           ((*Operand).isReg() &&
6786            (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6787              (*Operand).getReg()) ||
6788             ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6789               (*Operand).getReg())))) {
6790         return true;
6791       }
6792     }
6793     return false;
6794   } else {
6795     for (auto &Operand : Operands) {
6796       // We check the larger class QPR instead of just the legal class
6797       // MQPR, to more accurately report errors when using Q registers
6798       // outside of the allowed range.
6799       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6800           (Operand->isReg() &&
6801            (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6802              Operand->getReg()))))
6803         return false;
6804     }
6805     return true;
6806   }
6807 }
6808 
isDataTypeToken(StringRef Tok)6809 static bool isDataTypeToken(StringRef Tok) {
6810   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6811     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6812     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6813     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6814     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6815     Tok == ".f" || Tok == ".d";
6816 }
6817 
6818 // FIXME: This bit should probably be handled via an explicit match class
6819 // in the .td files that matches the suffix instead of having it be
6820 // a literal string token the way it is now.
doesIgnoreDataTypeSuffix(StringRef Mnemonic,StringRef DT)6821 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6822   return Mnemonic.starts_with("vldm") || Mnemonic.starts_with("vstm");
6823 }
6824 
6825 static void applyMnemonicAliases(StringRef &Mnemonic,
6826                                  const FeatureBitset &Features,
6827                                  unsigned VariantID);
6828 
6829 // The GNU assembler has aliases of ldrd and strd with the second register
6830 // omitted. We don't have a way to do that in tablegen, so fix it up here.
6831 //
6832 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6833 // the assembly parser could then generate confusing diagnostics refering to
6834 // it. If we do find anything that prevents us from doing the transformation we
6835 // bail out, and let the assembly parser report an error on the instruction as
6836 // it is written.
fixupGNULDRDAlias(StringRef Mnemonic,OperandVector & Operands)6837 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6838                                      OperandVector &Operands) {
6839   if (Mnemonic != "ldrd" && Mnemonic != "strd")
6840     return;
6841   if (Operands.size() < 4)
6842     return;
6843 
6844   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6845   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6846 
6847   if (!Op2.isReg())
6848     return;
6849   if (!Op3.isGPRMem())
6850     return;
6851 
6852   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6853   if (!GPR.contains(Op2.getReg()))
6854     return;
6855 
6856   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6857   if (!isThumb() && (RtEncoding & 1)) {
6858     // In ARM mode, the registers must be from an aligned pair, this
6859     // restriction does not apply in Thumb mode.
6860     return;
6861   }
6862   if (Op2.getReg() == ARM::PC)
6863     return;
6864   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6865   if (!PairedReg || PairedReg == ARM::PC ||
6866       (PairedReg == ARM::SP && !hasV8Ops()))
6867     return;
6868 
6869   Operands.insert(
6870       Operands.begin() + 3,
6871       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6872 }
6873 
6874 // Dual-register instruction have the following syntax:
6875 // <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6876 // This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6877 // operand. If the conversion fails an error is diagnosed, and the function
6878 // returns true.
CDEConvertDualRegOperand(StringRef Mnemonic,OperandVector & Operands)6879 bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6880                                             OperandVector &Operands) {
6881   assert(MS.isCDEDualRegInstr(Mnemonic));
6882   bool isPredicable =
6883       Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
6884   size_t NumPredOps = isPredicable ? 1 : 0;
6885 
6886   if (Operands.size() <= 3 + NumPredOps)
6887     return false;
6888 
6889   StringRef Op2Diag(
6890       "operand must be an even-numbered register in the range [r0, r10]");
6891 
6892   const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
6893   if (!Op2.isReg())
6894     return Error(Op2.getStartLoc(), Op2Diag);
6895 
6896   unsigned RNext;
6897   unsigned RPair;
6898   switch (Op2.getReg()) {
6899   default:
6900     return Error(Op2.getStartLoc(), Op2Diag);
6901   case ARM::R0:
6902     RNext = ARM::R1;
6903     RPair = ARM::R0_R1;
6904     break;
6905   case ARM::R2:
6906     RNext = ARM::R3;
6907     RPair = ARM::R2_R3;
6908     break;
6909   case ARM::R4:
6910     RNext = ARM::R5;
6911     RPair = ARM::R4_R5;
6912     break;
6913   case ARM::R6:
6914     RNext = ARM::R7;
6915     RPair = ARM::R6_R7;
6916     break;
6917   case ARM::R8:
6918     RNext = ARM::R9;
6919     RPair = ARM::R8_R9;
6920     break;
6921   case ARM::R10:
6922     RNext = ARM::R11;
6923     RPair = ARM::R10_R11;
6924     break;
6925   }
6926 
6927   const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
6928   if (!Op3.isReg() || Op3.getReg() != RNext)
6929     return Error(Op3.getStartLoc(), "operand must be a consecutive register");
6930 
6931   Operands.erase(Operands.begin() + 3 + NumPredOps);
6932   Operands[2 + NumPredOps] =
6933       ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
6934   return false;
6935 }
6936 
6937 /// Parse an arm instruction mnemonic followed by its operands.
ParseInstruction(ParseInstructionInfo & Info,StringRef Name,SMLoc NameLoc,OperandVector & Operands)6938 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6939                                     SMLoc NameLoc, OperandVector &Operands) {
6940   MCAsmParser &Parser = getParser();
6941 
6942   // Apply mnemonic aliases before doing anything else, as the destination
6943   // mnemonic may include suffices and we want to handle them normally.
6944   // The generic tblgen'erated code does this later, at the start of
6945   // MatchInstructionImpl(), but that's too late for aliases that include
6946   // any sort of suffix.
6947   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6948   unsigned AssemblerDialect = getParser().getAssemblerDialect();
6949   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6950 
6951   // First check for the ARM-specific .req directive.
6952   if (Parser.getTok().is(AsmToken::Identifier) &&
6953       Parser.getTok().getIdentifier().lower() == ".req") {
6954     parseDirectiveReq(Name, NameLoc);
6955     // We always return 'error' for this, as we're done with this
6956     // statement and don't need to match the 'instruction."
6957     return true;
6958   }
6959 
6960   // Create the leading tokens for the mnemonic, split by '.' characters.
6961   size_t Start = 0, Next = Name.find('.');
6962   StringRef Mnemonic = Name.slice(Start, Next);
6963   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6964 
6965   // Split out the predication code and carry setting flag from the mnemonic.
6966   unsigned PredicationCode;
6967   unsigned VPTPredicationCode;
6968   unsigned ProcessorIMod;
6969   bool CarrySetting;
6970   StringRef ITMask;
6971   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6972                            CarrySetting, ProcessorIMod, ITMask);
6973 
6974   // In Thumb1, only the branch (B) instruction can be predicated.
6975   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6976     return Error(NameLoc, "conditional execution not supported in Thumb1");
6977   }
6978 
6979   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6980 
6981   // Handle the mask for IT and VPT instructions. In ARMOperand and
6982   // MCOperand, this is stored in a format independent of the
6983   // condition code: the lowest set bit indicates the end of the
6984   // encoding, and above that, a 1 bit indicates 'else', and an 0
6985   // indicates 'then'. E.g.
6986   //    IT    -> 1000
6987   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
6988   //    ITxy  -> xy10    (e.g. ITET -> 1010)
6989   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
6990   // Note: See the ARM::PredBlockMask enum in
6991   //   /lib/Target/ARM/Utils/ARMBaseInfo.h
6992   if (Mnemonic == "it" || Mnemonic.starts_with("vpt") ||
6993       Mnemonic.starts_with("vpst")) {
6994     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
6995                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
6996                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
6997     if (ITMask.size() > 3) {
6998       if (Mnemonic == "it")
6999         return Error(Loc, "too many conditions on IT instruction");
7000       return Error(Loc, "too many conditions on VPT instruction");
7001     }
7002     unsigned Mask = 8;
7003     for (char Pos : llvm::reverse(ITMask)) {
7004       if (Pos != 't' && Pos != 'e') {
7005         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7006       }
7007       Mask >>= 1;
7008       if (Pos == 'e')
7009         Mask |= 8;
7010     }
7011     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7012   }
7013 
7014   // FIXME: This is all a pretty gross hack. We should automatically handle
7015   // optional operands like this via tblgen.
7016 
7017   // Next, add the CCOut and ConditionCode operands, if needed.
7018   //
7019   // For mnemonics which can ever incorporate a carry setting bit or predication
7020   // code, our matching model involves us always generating CCOut and
7021   // ConditionCode operands to match the mnemonic "as written" and then we let
7022   // the matcher deal with finding the right instruction or generating an
7023   // appropriate error.
7024   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7025   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7026                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7027 
7028   // If we had a carry-set on an instruction that can't do that, issue an
7029   // error.
7030   if (!CanAcceptCarrySet && CarrySetting) {
7031     return Error(NameLoc, "instruction '" + Mnemonic +
7032                  "' can not set flags, but 's' suffix specified");
7033   }
7034   // If we had a predication code on an instruction that can't do that, issue an
7035   // error.
7036   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7037     return Error(NameLoc, "instruction '" + Mnemonic +
7038                  "' is not predicable, but condition code specified");
7039   }
7040 
7041   // If we had a VPT predication code on an instruction that can't do that, issue an
7042   // error.
7043   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7044     return Error(NameLoc, "instruction '" + Mnemonic +
7045                  "' is not VPT predicable, but VPT code T/E is specified");
7046   }
7047 
7048   // Add the carry setting operand, if necessary.
7049   if (CanAcceptCarrySet) {
7050     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7051     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7052                                                Loc));
7053   }
7054 
7055   // Add the predication code operand, if necessary.
7056   if (CanAcceptPredicationCode) {
7057     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7058                                       CarrySetting);
7059     Operands.push_back(ARMOperand::CreateCondCode(
7060                        ARMCC::CondCodes(PredicationCode), Loc));
7061   }
7062 
7063   // Add the VPT predication code operand, if necessary.
7064   // FIXME: We don't add them for the instructions filtered below as these can
7065   // have custom operands which need special parsing.  This parsing requires
7066   // the operand to be in the same place in the OperandVector as their
7067   // definition in tblgen.  Since these instructions may also have the
7068   // scalar predication operand we do not add the vector one and leave until
7069   // now to fix it up.
7070   if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
7071       !Mnemonic.starts_with("vcmp") &&
7072       !(Mnemonic.starts_with("vcvt") && Mnemonic != "vcvta" &&
7073         Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7074     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7075                                       CarrySetting);
7076     Operands.push_back(ARMOperand::CreateVPTPred(
7077                          ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7078   }
7079 
7080   // Add the processor imod operand, if necessary.
7081   if (ProcessorIMod) {
7082     Operands.push_back(ARMOperand::CreateImm(
7083           MCConstantExpr::create(ProcessorIMod, getContext()),
7084                                  NameLoc, NameLoc));
7085   } else if (Mnemonic == "cps" && isMClass()) {
7086     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7087   }
7088 
7089   // Add the remaining tokens in the mnemonic.
7090   while (Next != StringRef::npos) {
7091     Start = Next;
7092     Next = Name.find('.', Start + 1);
7093     ExtraToken = Name.slice(Start, Next);
7094 
7095     // Some NEON instructions have an optional datatype suffix that is
7096     // completely ignored. Check for that.
7097     if (isDataTypeToken(ExtraToken) &&
7098         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7099       continue;
7100 
7101     // For for ARM mode generate an error if the .n qualifier is used.
7102     if (ExtraToken == ".n" && !isThumb()) {
7103       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7104       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7105                    "arm mode");
7106     }
7107 
7108     // The .n qualifier is always discarded as that is what the tables
7109     // and matcher expect.  In ARM mode the .w qualifier has no effect,
7110     // so discard it to avoid errors that can be caused by the matcher.
7111     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7112       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7113       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7114     }
7115   }
7116 
7117   // Read the remaining operands.
7118   if (getLexer().isNot(AsmToken::EndOfStatement)) {
7119     // Read the first operand.
7120     if (parseOperand(Operands, Mnemonic)) {
7121       return true;
7122     }
7123 
7124     while (parseOptionalToken(AsmToken::Comma)) {
7125       // Parse and remember the operand.
7126       if (parseOperand(Operands, Mnemonic)) {
7127         return true;
7128       }
7129     }
7130   }
7131 
7132   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7133     return true;
7134 
7135   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
7136 
7137   if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7138     // Dual-register instructions use even-odd register pairs as their
7139     // destination operand, in assembly such pair is spelled as two
7140     // consecutive registers, without any special syntax. ConvertDualRegOperand
7141     // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7142     // It returns true, if an error message has been emitted. If the function
7143     // returns false, the function either succeeded or an error (e.g. missing
7144     // operand) will be diagnosed elsewhere.
7145     if (MS.isCDEDualRegInstr(Mnemonic)) {
7146       bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
7147       if (GotError)
7148         return GotError;
7149     }
7150   }
7151 
7152   // Some instructions, mostly Thumb, have forms for the same mnemonic that
7153   // do and don't have a cc_out optional-def operand. With some spot-checks
7154   // of the operand list, we can figure out which variant we're trying to
7155   // parse and adjust accordingly before actually matching. We shouldn't ever
7156   // try to remove a cc_out operand that was explicitly set on the
7157   // mnemonic, of course (CarrySetting == true). Reason number #317 the
7158   // table driven matcher doesn't fit well with the ARM instruction set.
7159   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
7160     Operands.erase(Operands.begin() + 1);
7161 
7162   // Some instructions have the same mnemonic, but don't always
7163   // have a predicate. Distinguish them here and delete the
7164   // appropriate predicate if needed.  This could be either the scalar
7165   // predication code or the vector predication code.
7166   if (PredicationCode == ARMCC::AL &&
7167       shouldOmitPredicateOperand(Mnemonic, Operands))
7168     Operands.erase(Operands.begin() + 1);
7169 
7170 
7171   if (hasMVE()) {
7172     if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
7173         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7174       // Very nasty hack to deal with the vector predicated variant of vmovlt
7175       // the scalar predicated vmov with condition 'lt'.  We can not tell them
7176       // apart until we have parsed their operands.
7177       Operands.erase(Operands.begin() + 1);
7178       Operands.erase(Operands.begin());
7179       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7180       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7181                                          Mnemonic.size() - 1 + CarrySetting);
7182       Operands.insert(Operands.begin(),
7183                       ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7184       Operands.insert(Operands.begin(),
7185                       ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7186     } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7187                !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7188       // Another nasty hack to deal with the ambiguity between vcvt with scalar
7189       // predication 'ne' and vcvtn with vector predication 'e'.  As above we
7190       // can only distinguish between the two after we have parsed their
7191       // operands.
7192       Operands.erase(Operands.begin() + 1);
7193       Operands.erase(Operands.begin());
7194       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7195       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7196                                          Mnemonic.size() - 1 + CarrySetting);
7197       Operands.insert(Operands.begin(),
7198                       ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7199       Operands.insert(Operands.begin(),
7200                       ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7201     } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7202                !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7203       // Another hack, this time to distinguish between scalar predicated vmul
7204       // with 'lt' predication code and the vector instruction vmullt with
7205       // vector predication code "none"
7206       Operands.erase(Operands.begin() + 1);
7207       Operands.erase(Operands.begin());
7208       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7209       Operands.insert(Operands.begin(),
7210                       ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7211     }
7212     // For vmov and vcmp, as mentioned earlier, we did not add the vector
7213     // predication code, since these may contain operands that require
7214     // special parsing.  So now we have to see if they require vector
7215     // predication and replace the scalar one with the vector predication
7216     // operand if that is the case.
7217     else if (Mnemonic == "vmov" || Mnemonic.starts_with("vcmp") ||
7218              (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
7219               !Mnemonic.starts_with("vcvtn") &&
7220               !Mnemonic.starts_with("vcvtp") &&
7221               !Mnemonic.starts_with("vcvtm"))) {
7222       if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7223         // We could not split the vector predicate off vcvt because it might
7224         // have been the scalar vcvtt instruction.  Now we know its a vector
7225         // instruction, we still need to check whether its the vector
7226         // predicated vcvt with 'Then' predication or the vector vcvtt.  We can
7227         // distinguish the two based on the suffixes, if it is any of
7228         // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7229         if (Mnemonic.starts_with("vcvtt") && Operands.size() >= 4) {
7230           auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
7231           auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
7232           if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
7233                 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
7234             Operands.erase(Operands.begin());
7235             SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7236             VPTPredicationCode = ARMVCC::Then;
7237 
7238             Mnemonic = Mnemonic.substr(0, 4);
7239             Operands.insert(Operands.begin(),
7240                             ARMOperand::CreateToken(Mnemonic, MLoc));
7241           }
7242         }
7243         Operands.erase(Operands.begin() + 1);
7244         SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7245                                           Mnemonic.size() + CarrySetting);
7246         Operands.insert(Operands.begin() + 1,
7247                         ARMOperand::CreateVPTPred(
7248                             ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7249       }
7250     } else if (CanAcceptVPTPredicationCode) {
7251       // For all other instructions, make sure only one of the two
7252       // predication operands is left behind, depending on whether we should
7253       // use the vector predication.
7254       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7255         if (CanAcceptPredicationCode)
7256           Operands.erase(Operands.begin() + 2);
7257         else
7258           Operands.erase(Operands.begin() + 1);
7259       } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
7260         Operands.erase(Operands.begin() + 1);
7261       }
7262     }
7263   }
7264 
7265   if (VPTPredicationCode != ARMVCC::None) {
7266     bool usedVPTPredicationCode = false;
7267     for (unsigned I = 1; I < Operands.size(); ++I)
7268       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7269         usedVPTPredicationCode = true;
7270     if (!usedVPTPredicationCode) {
7271       // If we have a VPT predication code and we haven't just turned it
7272       // into an operand, then it was a mistake for splitMnemonic to
7273       // separate it from the rest of the mnemonic in the first place,
7274       // and this may lead to wrong disassembly (e.g. scalar floating
7275       // point VCMPE is actually a different instruction from VCMP, so
7276       // we mustn't treat them the same). In that situation, glue it
7277       // back on.
7278       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7279       Operands.erase(Operands.begin());
7280       Operands.insert(Operands.begin(),
7281                       ARMOperand::CreateToken(Mnemonic, NameLoc));
7282     }
7283   }
7284 
7285     // ARM mode 'blx' need special handling, as the register operand version
7286     // is predicable, but the label operand version is not. So, we can't rely
7287     // on the Mnemonic based checking to correctly figure out when to put
7288     // a k_CondCode operand in the list. If we're trying to match the label
7289     // version, remove the k_CondCode operand here.
7290     if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
7291         static_cast<ARMOperand &>(*Operands[2]).isImm())
7292       Operands.erase(Operands.begin() + 1);
7293 
7294     // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7295     // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7296     // a single GPRPair reg operand is used in the .td file to replace the two
7297     // GPRs. However, when parsing from asm, the two GRPs cannot be
7298     // automatically
7299     // expressed as a GPRPair, so we have to manually merge them.
7300     // FIXME: We would really like to be able to tablegen'erate this.
7301     if (!isThumb() && Operands.size() > 4 &&
7302         (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7303          Mnemonic == "stlexd")) {
7304       bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7305       unsigned Idx = isLoad ? 2 : 3;
7306       ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7307       ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7308 
7309       const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7310       // Adjust only if Op1 and Op2 are GPRs.
7311       if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
7312           MRC.contains(Op2.getReg())) {
7313         unsigned Reg1 = Op1.getReg();
7314         unsigned Reg2 = Op2.getReg();
7315         unsigned Rt = MRI->getEncodingValue(Reg1);
7316         unsigned Rt2 = MRI->getEncodingValue(Reg2);
7317 
7318         // Rt2 must be Rt + 1 and Rt must be even.
7319         if (Rt + 1 != Rt2 || (Rt & 1)) {
7320           return Error(Op2.getStartLoc(),
7321                        isLoad ? "destination operands must be sequential"
7322                               : "source operands must be sequential");
7323         }
7324         unsigned NewReg = MRI->getMatchingSuperReg(
7325             Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7326         Operands[Idx] =
7327             ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7328         Operands.erase(Operands.begin() + Idx + 1);
7329       }
7330   }
7331 
7332   // GNU Assembler extension (compatibility).
7333   fixupGNULDRDAlias(Mnemonic, Operands);
7334 
7335   // FIXME: As said above, this is all a pretty gross hack.  This instruction
7336   // does not fit with other "subs" and tblgen.
7337   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7338   // so the Mnemonic is the original name "subs" and delete the predicate
7339   // operand so it will match the table entry.
7340   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
7341       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
7342       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
7343       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
7344       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
7345       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
7346     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7347     Operands.erase(Operands.begin() + 1);
7348   }
7349   return false;
7350 }
7351 
7352 // Validate context-sensitive operand constraints.
7353 
7354 // return 'true' if register list contains non-low GPR registers,
7355 // 'false' otherwise. If Reg is in the register list or is HiReg, set
7356 // 'containsReg' to true.
checkLowRegisterList(const MCInst & Inst,unsigned OpNo,unsigned Reg,unsigned HiReg,bool & containsReg)7357 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7358                                  unsigned Reg, unsigned HiReg,
7359                                  bool &containsReg) {
7360   containsReg = false;
7361   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7362     unsigned OpReg = Inst.getOperand(i).getReg();
7363     if (OpReg == Reg)
7364       containsReg = true;
7365     // Anything other than a low register isn't legal here.
7366     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7367       return true;
7368   }
7369   return false;
7370 }
7371 
7372 // Check if the specified regisgter is in the register list of the inst,
7373 // starting at the indicated operand number.
listContainsReg(const MCInst & Inst,unsigned OpNo,unsigned Reg)7374 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7375   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7376     unsigned OpReg = Inst.getOperand(i).getReg();
7377     if (OpReg == Reg)
7378       return true;
7379   }
7380   return false;
7381 }
7382 
7383 // Return true if instruction has the interesting property of being
7384 // allowed in IT blocks, but not being predicable.
instIsBreakpoint(const MCInst & Inst)7385 static bool instIsBreakpoint(const MCInst &Inst) {
7386     return Inst.getOpcode() == ARM::tBKPT ||
7387            Inst.getOpcode() == ARM::BKPT ||
7388            Inst.getOpcode() == ARM::tHLT ||
7389            Inst.getOpcode() == ARM::HLT;
7390 }
7391 
validatetLDMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned ListNo,bool IsARPop)7392 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7393                                        const OperandVector &Operands,
7394                                        unsigned ListNo, bool IsARPop) {
7395   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7396   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7397 
7398   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7399   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
7400   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7401 
7402   if (!IsARPop && ListContainsSP)
7403     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7404                  "SP may not be in the register list");
7405   else if (ListContainsPC && ListContainsLR)
7406     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7407                  "PC and LR may not be in the register list simultaneously");
7408   return false;
7409 }
7410 
validatetSTMRegList(const MCInst & Inst,const OperandVector & Operands,unsigned ListNo)7411 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7412                                        const OperandVector &Operands,
7413                                        unsigned ListNo) {
7414   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7415   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7416 
7417   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7418   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7419 
7420   if (ListContainsSP && ListContainsPC)
7421     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7422                  "SP and PC may not be in the register list");
7423   else if (ListContainsSP)
7424     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7425                  "SP may not be in the register list");
7426   else if (ListContainsPC)
7427     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7428                  "PC may not be in the register list");
7429   return false;
7430 }
7431 
validateLDRDSTRD(MCInst & Inst,const OperandVector & Operands,bool Load,bool ARMMode,bool Writeback)7432 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
7433                                     const OperandVector &Operands,
7434                                     bool Load, bool ARMMode, bool Writeback) {
7435   unsigned RtIndex = Load || !Writeback ? 0 : 1;
7436   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7437   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7438 
7439   if (ARMMode) {
7440     // Rt can't be R14.
7441     if (Rt == 14)
7442       return Error(Operands[3]->getStartLoc(),
7443                   "Rt can't be R14");
7444 
7445     // Rt must be even-numbered.
7446     if ((Rt & 1) == 1)
7447       return Error(Operands[3]->getStartLoc(),
7448                    "Rt must be even-numbered");
7449 
7450     // Rt2 must be Rt + 1.
7451     if (Rt2 != Rt + 1) {
7452       if (Load)
7453         return Error(Operands[3]->getStartLoc(),
7454                      "destination operands must be sequential");
7455       else
7456         return Error(Operands[3]->getStartLoc(),
7457                      "source operands must be sequential");
7458     }
7459 
7460     // FIXME: Diagnose m == 15
7461     // FIXME: Diagnose ldrd with m == t || m == t2.
7462   }
7463 
7464   if (!ARMMode && Load) {
7465     if (Rt2 == Rt)
7466       return Error(Operands[3]->getStartLoc(),
7467                    "destination operands can't be identical");
7468   }
7469 
7470   if (Writeback) {
7471     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7472 
7473     if (Rn == Rt || Rn == Rt2) {
7474       if (Load)
7475         return Error(Operands[3]->getStartLoc(),
7476                      "base register needs to be different from destination "
7477                      "registers");
7478       else
7479         return Error(Operands[3]->getStartLoc(),
7480                      "source register and base register can't be identical");
7481     }
7482 
7483     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7484     // (Except the immediate form of ldrd?)
7485   }
7486 
7487   return false;
7488 }
7489 
findFirstVectorPredOperandIdx(const MCInstrDesc & MCID)7490 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7491   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7492     if (ARM::isVpred(MCID.operands()[i].OperandType))
7493       return i;
7494   }
7495   return -1;
7496 }
7497 
isVectorPredicable(const MCInstrDesc & MCID)7498 static bool isVectorPredicable(const MCInstrDesc &MCID) {
7499   return findFirstVectorPredOperandIdx(MCID) != -1;
7500 }
7501 
isARMMCExpr(MCParsedAsmOperand & MCOp)7502 static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
7503   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7504   if (!Op.isImm())
7505     return false;
7506   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7507   if (CE)
7508     return false;
7509   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7510   if (!E)
7511     return false;
7512   return true;
7513 }
7514 
7515 // FIXME: We would really like to be able to tablegen'erate this.
validateInstruction(MCInst & Inst,const OperandVector & Operands)7516 bool ARMAsmParser::validateInstruction(MCInst &Inst,
7517                                        const OperandVector &Operands) {
7518   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7519   SMLoc Loc = Operands[0]->getStartLoc();
7520 
7521   // Check the IT block state first.
7522   // NOTE: BKPT and HLT instructions have the interesting property of being
7523   // allowed in IT blocks, but not being predicable. They just always execute.
7524   if (inITBlock() && !instIsBreakpoint(Inst)) {
7525     // The instruction must be predicable.
7526     if (!MCID.isPredicable())
7527       return Error(Loc, "instructions in IT block must be predicable");
7528     ARMCC::CondCodes Cond = ARMCC::CondCodes(
7529         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7530     if (Cond != currentITCond()) {
7531       // Find the condition code Operand to get its SMLoc information.
7532       SMLoc CondLoc;
7533       for (unsigned I = 1; I < Operands.size(); ++I)
7534         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7535           CondLoc = Operands[I]->getStartLoc();
7536       return Error(CondLoc, "incorrect condition in IT block; got '" +
7537                                 StringRef(ARMCondCodeToString(Cond)) +
7538                                 "', but expected '" +
7539                                 ARMCondCodeToString(currentITCond()) + "'");
7540     }
7541   // Check for non-'al' condition codes outside of the IT block.
7542   } else if (isThumbTwo() && MCID.isPredicable() &&
7543              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7544              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7545              Inst.getOpcode() != ARM::t2Bcc &&
7546              Inst.getOpcode() != ARM::t2BFic) {
7547     return Error(Loc, "predicated instructions must be in IT block");
7548   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7549              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7550                  ARMCC::AL) {
7551     return Warning(Loc, "predicated instructions should be in IT block");
7552   } else if (!MCID.isPredicable()) {
7553     // Check the instruction doesn't have a predicate operand anyway
7554     // that it's not allowed to use. Sometimes this happens in order
7555     // to keep instructions the same shape even though one cannot
7556     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7557     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7558       if (MCID.operands()[i].isPredicate()) {
7559         if (Inst.getOperand(i).getImm() != ARMCC::AL)
7560           return Error(Loc, "instruction is not predicable");
7561         break;
7562       }
7563     }
7564   }
7565 
7566   // PC-setting instructions in an IT block, but not the last instruction of
7567   // the block, are UNPREDICTABLE.
7568   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7569     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7570   }
7571 
7572   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7573     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7574     if (!isVectorPredicable(MCID))
7575       return Error(Loc, "instruction in VPT block must be predicable");
7576     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7577     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7578     if (Pred != VPTPred) {
7579       SMLoc PredLoc;
7580       for (unsigned I = 1; I < Operands.size(); ++I)
7581         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7582           PredLoc = Operands[I]->getStartLoc();
7583       return Error(PredLoc, "incorrect predication in VPT block; got '" +
7584                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7585                    "', but expected '" +
7586                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7587     }
7588   }
7589   else if (isVectorPredicable(MCID) &&
7590            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7591            ARMVCC::None)
7592     return Error(Loc, "VPT predicated instructions must be in VPT block");
7593 
7594   const unsigned Opcode = Inst.getOpcode();
7595   switch (Opcode) {
7596   case ARM::t2IT: {
7597     // Encoding is unpredictable if it ever results in a notional 'NV'
7598     // predicate. Since we don't parse 'NV' directly this means an 'AL'
7599     // predicate with an "else" mask bit.
7600     unsigned Cond = Inst.getOperand(0).getImm();
7601     unsigned Mask = Inst.getOperand(1).getImm();
7602 
7603     // Conditions only allowing a 't' are those with no set bit except
7604     // the lowest-order one that indicates the end of the sequence. In
7605     // other words, powers of 2.
7606     if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7607       return Error(Loc, "unpredictable IT predicate sequence");
7608     break;
7609   }
7610   case ARM::LDRD:
7611     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7612                          /*Writeback*/false))
7613       return true;
7614     break;
7615   case ARM::LDRD_PRE:
7616   case ARM::LDRD_POST:
7617     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7618                          /*Writeback*/true))
7619       return true;
7620     break;
7621   case ARM::t2LDRDi8:
7622     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7623                          /*Writeback*/false))
7624       return true;
7625     break;
7626   case ARM::t2LDRD_PRE:
7627   case ARM::t2LDRD_POST:
7628     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7629                          /*Writeback*/true))
7630       return true;
7631     break;
7632   case ARM::t2BXJ: {
7633     const unsigned RmReg = Inst.getOperand(0).getReg();
7634     // Rm = SP is no longer unpredictable in v8-A
7635     if (RmReg == ARM::SP && !hasV8Ops())
7636       return Error(Operands[2]->getStartLoc(),
7637                    "r13 (SP) is an unpredictable operand to BXJ");
7638     return false;
7639   }
7640   case ARM::STRD:
7641     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7642                          /*Writeback*/false))
7643       return true;
7644     break;
7645   case ARM::STRD_PRE:
7646   case ARM::STRD_POST:
7647     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7648                          /*Writeback*/true))
7649       return true;
7650     break;
7651   case ARM::t2STRD_PRE:
7652   case ARM::t2STRD_POST:
7653     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7654                          /*Writeback*/true))
7655       return true;
7656     break;
7657   case ARM::STR_PRE_IMM:
7658   case ARM::STR_PRE_REG:
7659   case ARM::t2STR_PRE:
7660   case ARM::STR_POST_IMM:
7661   case ARM::STR_POST_REG:
7662   case ARM::t2STR_POST:
7663   case ARM::STRH_PRE:
7664   case ARM::t2STRH_PRE:
7665   case ARM::STRH_POST:
7666   case ARM::t2STRH_POST:
7667   case ARM::STRB_PRE_IMM:
7668   case ARM::STRB_PRE_REG:
7669   case ARM::t2STRB_PRE:
7670   case ARM::STRB_POST_IMM:
7671   case ARM::STRB_POST_REG:
7672   case ARM::t2STRB_POST: {
7673     // Rt must be different from Rn.
7674     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7675     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7676 
7677     if (Rt == Rn)
7678       return Error(Operands[3]->getStartLoc(),
7679                    "source register and base register can't be identical");
7680     return false;
7681   }
7682   case ARM::t2LDR_PRE_imm:
7683   case ARM::t2LDR_POST_imm:
7684   case ARM::t2STR_PRE_imm:
7685   case ARM::t2STR_POST_imm: {
7686     // Rt must be different from Rn.
7687     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7688     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7689 
7690     if (Rt == Rn)
7691       return Error(Operands[3]->getStartLoc(),
7692                    "destination register and base register can't be identical");
7693     if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7694         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7695       int Imm = Inst.getOperand(2).getImm();
7696       if (Imm > 255 || Imm < -255)
7697         return Error(Operands[5]->getStartLoc(),
7698                      "operand must be in range [-255, 255]");
7699     }
7700     if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7701         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7702       if (Inst.getOperand(0).getReg() == ARM::PC) {
7703         return Error(Operands[3]->getStartLoc(),
7704                      "operand must be a register in range [r0, r14]");
7705       }
7706     }
7707     return false;
7708   }
7709 
7710   case ARM::t2LDRB_OFFSET_imm:
7711   case ARM::t2LDRB_PRE_imm:
7712   case ARM::t2LDRB_POST_imm:
7713   case ARM::t2STRB_OFFSET_imm:
7714   case ARM::t2STRB_PRE_imm:
7715   case ARM::t2STRB_POST_imm: {
7716     if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7717         Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7718         Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7719         Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7720       int Imm = Inst.getOperand(2).getImm();
7721       if (Imm > 255 || Imm < -255)
7722         return Error(Operands[5]->getStartLoc(),
7723                      "operand must be in range [-255, 255]");
7724     } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7725                Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7726       int Imm = Inst.getOperand(2).getImm();
7727       if (Imm > 0 || Imm < -255)
7728         return Error(Operands[5]->getStartLoc(),
7729                      "operand must be in range [0, 255] with a negative sign");
7730     }
7731     if (Inst.getOperand(0).getReg() == ARM::PC) {
7732       return Error(Operands[3]->getStartLoc(),
7733                    "if operand is PC, should call the LDRB (literal)");
7734     }
7735     return false;
7736   }
7737 
7738   case ARM::t2LDRH_OFFSET_imm:
7739   case ARM::t2LDRH_PRE_imm:
7740   case ARM::t2LDRH_POST_imm:
7741   case ARM::t2STRH_OFFSET_imm:
7742   case ARM::t2STRH_PRE_imm:
7743   case ARM::t2STRH_POST_imm: {
7744     if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7745         Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7746         Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7747         Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7748       int Imm = Inst.getOperand(2).getImm();
7749       if (Imm > 255 || Imm < -255)
7750         return Error(Operands[5]->getStartLoc(),
7751                      "operand must be in range [-255, 255]");
7752     } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7753                Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7754       int Imm = Inst.getOperand(2).getImm();
7755       if (Imm > 0 || Imm < -255)
7756         return Error(Operands[5]->getStartLoc(),
7757                      "operand must be in range [0, 255] with a negative sign");
7758     }
7759     if (Inst.getOperand(0).getReg() == ARM::PC) {
7760       return Error(Operands[3]->getStartLoc(),
7761                    "if operand is PC, should call the LDRH (literal)");
7762     }
7763     return false;
7764   }
7765 
7766   case ARM::t2LDRSB_OFFSET_imm:
7767   case ARM::t2LDRSB_PRE_imm:
7768   case ARM::t2LDRSB_POST_imm: {
7769     if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7770         Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7771       int Imm = Inst.getOperand(2).getImm();
7772       if (Imm > 255 || Imm < -255)
7773         return Error(Operands[5]->getStartLoc(),
7774                      "operand must be in range [-255, 255]");
7775     } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7776       int Imm = Inst.getOperand(2).getImm();
7777       if (Imm > 0 || Imm < -255)
7778         return Error(Operands[5]->getStartLoc(),
7779                      "operand must be in range [0, 255] with a negative sign");
7780     }
7781     if (Inst.getOperand(0).getReg() == ARM::PC) {
7782       return Error(Operands[3]->getStartLoc(),
7783                    "if operand is PC, should call the LDRH (literal)");
7784     }
7785     return false;
7786   }
7787 
7788   case ARM::t2LDRSH_OFFSET_imm:
7789   case ARM::t2LDRSH_PRE_imm:
7790   case ARM::t2LDRSH_POST_imm: {
7791     if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7792         Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7793       int Imm = Inst.getOperand(2).getImm();
7794       if (Imm > 255 || Imm < -255)
7795         return Error(Operands[5]->getStartLoc(),
7796                      "operand must be in range [-255, 255]");
7797     } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7798       int Imm = Inst.getOperand(2).getImm();
7799       if (Imm > 0 || Imm < -255)
7800         return Error(Operands[5]->getStartLoc(),
7801                      "operand must be in range [0, 255] with a negative sign");
7802     }
7803     if (Inst.getOperand(0).getReg() == ARM::PC) {
7804       return Error(Operands[3]->getStartLoc(),
7805                    "if operand is PC, should call the LDRH (literal)");
7806     }
7807     return false;
7808   }
7809 
7810   case ARM::LDR_PRE_IMM:
7811   case ARM::LDR_PRE_REG:
7812   case ARM::t2LDR_PRE:
7813   case ARM::LDR_POST_IMM:
7814   case ARM::LDR_POST_REG:
7815   case ARM::t2LDR_POST:
7816   case ARM::LDRH_PRE:
7817   case ARM::t2LDRH_PRE:
7818   case ARM::LDRH_POST:
7819   case ARM::t2LDRH_POST:
7820   case ARM::LDRSH_PRE:
7821   case ARM::t2LDRSH_PRE:
7822   case ARM::LDRSH_POST:
7823   case ARM::t2LDRSH_POST:
7824   case ARM::LDRB_PRE_IMM:
7825   case ARM::LDRB_PRE_REG:
7826   case ARM::t2LDRB_PRE:
7827   case ARM::LDRB_POST_IMM:
7828   case ARM::LDRB_POST_REG:
7829   case ARM::t2LDRB_POST:
7830   case ARM::LDRSB_PRE:
7831   case ARM::t2LDRSB_PRE:
7832   case ARM::LDRSB_POST:
7833   case ARM::t2LDRSB_POST: {
7834     // Rt must be different from Rn.
7835     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7836     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7837 
7838     if (Rt == Rn)
7839       return Error(Operands[3]->getStartLoc(),
7840                    "destination register and base register can't be identical");
7841     return false;
7842   }
7843 
7844   case ARM::MVE_VLDRBU8_rq:
7845   case ARM::MVE_VLDRBU16_rq:
7846   case ARM::MVE_VLDRBS16_rq:
7847   case ARM::MVE_VLDRBU32_rq:
7848   case ARM::MVE_VLDRBS32_rq:
7849   case ARM::MVE_VLDRHU16_rq:
7850   case ARM::MVE_VLDRHU16_rq_u:
7851   case ARM::MVE_VLDRHU32_rq:
7852   case ARM::MVE_VLDRHU32_rq_u:
7853   case ARM::MVE_VLDRHS32_rq:
7854   case ARM::MVE_VLDRHS32_rq_u:
7855   case ARM::MVE_VLDRWU32_rq:
7856   case ARM::MVE_VLDRWU32_rq_u:
7857   case ARM::MVE_VLDRDU64_rq:
7858   case ARM::MVE_VLDRDU64_rq_u:
7859   case ARM::MVE_VLDRWU32_qi:
7860   case ARM::MVE_VLDRWU32_qi_pre:
7861   case ARM::MVE_VLDRDU64_qi:
7862   case ARM::MVE_VLDRDU64_qi_pre: {
7863     // Qd must be different from Qm.
7864     unsigned QdIdx = 0, QmIdx = 2;
7865     bool QmIsPointer = false;
7866     switch (Opcode) {
7867     case ARM::MVE_VLDRWU32_qi:
7868     case ARM::MVE_VLDRDU64_qi:
7869       QmIdx = 1;
7870       QmIsPointer = true;
7871       break;
7872     case ARM::MVE_VLDRWU32_qi_pre:
7873     case ARM::MVE_VLDRDU64_qi_pre:
7874       QdIdx = 1;
7875       QmIsPointer = true;
7876       break;
7877     }
7878 
7879     const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
7880     const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
7881 
7882     if (Qd == Qm) {
7883       return Error(Operands[3]->getStartLoc(),
7884                    Twine("destination vector register and vector ") +
7885                    (QmIsPointer ? "pointer" : "offset") +
7886                    " register can't be identical");
7887     }
7888     return false;
7889   }
7890 
7891   case ARM::SBFX:
7892   case ARM::t2SBFX:
7893   case ARM::UBFX:
7894   case ARM::t2UBFX: {
7895     // Width must be in range [1, 32-lsb].
7896     unsigned LSB = Inst.getOperand(2).getImm();
7897     unsigned Widthm1 = Inst.getOperand(3).getImm();
7898     if (Widthm1 >= 32 - LSB)
7899       return Error(Operands[5]->getStartLoc(),
7900                    "bitfield width must be in range [1,32-lsb]");
7901     return false;
7902   }
7903   // Notionally handles ARM::tLDMIA_UPD too.
7904   case ARM::tLDMIA: {
7905     // If we're parsing Thumb2, the .w variant is available and handles
7906     // most cases that are normally illegal for a Thumb1 LDM instruction.
7907     // We'll make the transformation in processInstruction() if necessary.
7908     //
7909     // Thumb LDM instructions are writeback iff the base register is not
7910     // in the register list.
7911     unsigned Rn = Inst.getOperand(0).getReg();
7912     bool HasWritebackToken =
7913         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7914          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7915     bool ListContainsBase;
7916     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7917       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7918                    "registers must be in range r0-r7");
7919     // If we should have writeback, then there should be a '!' token.
7920     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7921       return Error(Operands[2]->getStartLoc(),
7922                    "writeback operator '!' expected");
7923     // If we should not have writeback, there must not be a '!'. This is
7924     // true even for the 32-bit wide encodings.
7925     if (ListContainsBase && HasWritebackToken)
7926       return Error(Operands[3]->getStartLoc(),
7927                    "writeback operator '!' not allowed when base register "
7928                    "in register list");
7929 
7930     if (validatetLDMRegList(Inst, Operands, 3))
7931       return true;
7932     break;
7933   }
7934   case ARM::LDMIA_UPD:
7935   case ARM::LDMDB_UPD:
7936   case ARM::LDMIB_UPD:
7937   case ARM::LDMDA_UPD:
7938     // ARM variants loading and updating the same register are only officially
7939     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7940     if (!hasV7Ops())
7941       break;
7942     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7943       return Error(Operands.back()->getStartLoc(),
7944                    "writeback register not allowed in register list");
7945     break;
7946   case ARM::t2LDMIA:
7947   case ARM::t2LDMDB:
7948     if (validatetLDMRegList(Inst, Operands, 3))
7949       return true;
7950     break;
7951   case ARM::t2STMIA:
7952   case ARM::t2STMDB:
7953     if (validatetSTMRegList(Inst, Operands, 3))
7954       return true;
7955     break;
7956   case ARM::t2LDMIA_UPD:
7957   case ARM::t2LDMDB_UPD:
7958   case ARM::t2STMIA_UPD:
7959   case ARM::t2STMDB_UPD:
7960     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7961       return Error(Operands.back()->getStartLoc(),
7962                    "writeback register not allowed in register list");
7963 
7964     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
7965       if (validatetLDMRegList(Inst, Operands, 3))
7966         return true;
7967     } else {
7968       if (validatetSTMRegList(Inst, Operands, 3))
7969         return true;
7970     }
7971     break;
7972 
7973   case ARM::sysLDMIA_UPD:
7974   case ARM::sysLDMDA_UPD:
7975   case ARM::sysLDMDB_UPD:
7976   case ARM::sysLDMIB_UPD:
7977     if (!listContainsReg(Inst, 3, ARM::PC))
7978       return Error(Operands[4]->getStartLoc(),
7979                    "writeback register only allowed on system LDM "
7980                    "if PC in register-list");
7981     break;
7982   case ARM::sysSTMIA_UPD:
7983   case ARM::sysSTMDA_UPD:
7984   case ARM::sysSTMDB_UPD:
7985   case ARM::sysSTMIB_UPD:
7986     return Error(Operands[2]->getStartLoc(),
7987                  "system STM cannot have writeback register");
7988   case ARM::tMUL:
7989     // The second source operand must be the same register as the destination
7990     // operand.
7991     //
7992     // In this case, we must directly check the parsed operands because the
7993     // cvtThumbMultiply() function is written in such a way that it guarantees
7994     // this first statement is always true for the new Inst.  Essentially, the
7995     // destination is unconditionally copied into the second source operand
7996     // without checking to see if it matches what we actually parsed.
7997     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
7998                                  ((ARMOperand &)*Operands[5]).getReg()) &&
7999         (((ARMOperand &)*Operands[3]).getReg() !=
8000          ((ARMOperand &)*Operands[4]).getReg())) {
8001       return Error(Operands[3]->getStartLoc(),
8002                    "destination register must match source register");
8003     }
8004     break;
8005 
8006   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8007   // so only issue a diagnostic for thumb1. The instructions will be
8008   // switched to the t2 encodings in processInstruction() if necessary.
8009   case ARM::tPOP: {
8010     bool ListContainsBase;
8011     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8012         !isThumbTwo())
8013       return Error(Operands[2]->getStartLoc(),
8014                    "registers must be in range r0-r7 or pc");
8015     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
8016       return true;
8017     break;
8018   }
8019   case ARM::tPUSH: {
8020     bool ListContainsBase;
8021     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8022         !isThumbTwo())
8023       return Error(Operands[2]->getStartLoc(),
8024                    "registers must be in range r0-r7 or lr");
8025     if (validatetSTMRegList(Inst, Operands, 2))
8026       return true;
8027     break;
8028   }
8029   case ARM::tSTMIA_UPD: {
8030     bool ListContainsBase, InvalidLowList;
8031     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8032                                           0, ListContainsBase);
8033     if (InvalidLowList && !isThumbTwo())
8034       return Error(Operands[4]->getStartLoc(),
8035                    "registers must be in range r0-r7");
8036 
8037     // This would be converted to a 32-bit stm, but that's not valid if the
8038     // writeback register is in the list.
8039     if (InvalidLowList && ListContainsBase)
8040       return Error(Operands[4]->getStartLoc(),
8041                    "writeback operator '!' not allowed when base register "
8042                    "in register list");
8043 
8044     if (validatetSTMRegList(Inst, Operands, 4))
8045       return true;
8046     break;
8047   }
8048   case ARM::tADDrSP:
8049     // If the non-SP source operand and the destination operand are not the
8050     // same, we need thumb2 (for the wide encoding), or we have an error.
8051     if (!isThumbTwo() &&
8052         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8053       return Error(Operands[4]->getStartLoc(),
8054                    "source register must be the same as destination");
8055     }
8056     break;
8057 
8058   case ARM::t2ADDrr:
8059   case ARM::t2ADDrs:
8060   case ARM::t2SUBrr:
8061   case ARM::t2SUBrs:
8062     if (Inst.getOperand(0).getReg() == ARM::SP &&
8063         Inst.getOperand(1).getReg() != ARM::SP)
8064       return Error(Operands[4]->getStartLoc(),
8065                    "source register must be sp if destination is sp");
8066     break;
8067 
8068   // Final range checking for Thumb unconditional branch instructions.
8069   case ARM::tB:
8070     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
8071       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8072     break;
8073   case ARM::t2B: {
8074     int op = (Operands[2]->isImm()) ? 2 : 3;
8075     ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8076     // Delay the checks of symbolic expressions until they are resolved.
8077     if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8078         !Operand.isSignedOffset<24, 1>())
8079       return Error(Operands[op]->getStartLoc(), "branch target out of range");
8080     break;
8081   }
8082   // Final range checking for Thumb conditional branch instructions.
8083   case ARM::tBcc:
8084     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
8085       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8086     break;
8087   case ARM::t2Bcc: {
8088     int Op = (Operands[2]->isImm()) ? 2 : 3;
8089     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8090       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8091     break;
8092   }
8093   case ARM::tCBZ:
8094   case ARM::tCBNZ: {
8095     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
8096       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8097     break;
8098   }
8099   case ARM::MOVi16:
8100   case ARM::MOVTi16:
8101   case ARM::t2MOVi16:
8102   case ARM::t2MOVTi16:
8103     {
8104     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8105     // especially when we turn it into a movw and the expression <symbol> does
8106     // not have a :lower16: or :upper16 as part of the expression.  We don't
8107     // want the behavior of silently truncating, which can be unexpected and
8108     // lead to bugs that are difficult to find since this is an easy mistake
8109     // to make.
8110     int i = (Operands[3]->isImm()) ? 3 : 4;
8111     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8112     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8113     if (CE) break;
8114     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8115     if (!E) break;
8116     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8117     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8118                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8119       return Error(
8120           Op.getStartLoc(),
8121           "immediate expression for mov requires :lower16: or :upper16");
8122     break;
8123   }
8124   case ARM::tADDi8: {
8125     MCParsedAsmOperand &Op = *Operands[4];
8126     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8127       return Error(Op.getStartLoc(),
8128                    "Immediate expression for Thumb adds requires :lower0_7:,"
8129                    " :lower8_15:, :upper0_7: or :upper8_15:");
8130     break;
8131   }
8132   case ARM::tMOVi8: {
8133     MCParsedAsmOperand &Op = *Operands[2];
8134     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8135       return Error(Op.getStartLoc(),
8136                    "Immediate expression for Thumb movs requires :lower0_7:,"
8137                    " :lower8_15:, :upper0_7: or :upper8_15:");
8138     break;
8139   }
8140   case ARM::HINT:
8141   case ARM::t2HINT: {
8142     unsigned Imm8 = Inst.getOperand(0).getImm();
8143     unsigned Pred = Inst.getOperand(1).getImm();
8144     // ESB is not predicable (pred must be AL). Without the RAS extension, this
8145     // behaves as any other unallocated hint.
8146     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8147       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8148                                                "predicable, but condition "
8149                                                "code specified");
8150     if (Imm8 == 0x14 && Pred != ARMCC::AL)
8151       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8152                                                "predicable, but condition "
8153                                                "code specified");
8154     break;
8155   }
8156   case ARM::t2BFi:
8157   case ARM::t2BFr:
8158   case ARM::t2BFLi:
8159   case ARM::t2BFLr: {
8160     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
8161         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8162       return Error(Operands[2]->getStartLoc(),
8163                    "branch location out of range or not a multiple of 2");
8164 
8165     if (Opcode == ARM::t2BFi) {
8166       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
8167         return Error(Operands[3]->getStartLoc(),
8168                      "branch target out of range or not a multiple of 2");
8169     } else if (Opcode == ARM::t2BFLi) {
8170       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
8171         return Error(Operands[3]->getStartLoc(),
8172                      "branch target out of range or not a multiple of 2");
8173     }
8174     break;
8175   }
8176   case ARM::t2BFic: {
8177     if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
8178         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8179       return Error(Operands[1]->getStartLoc(),
8180                    "branch location out of range or not a multiple of 2");
8181 
8182     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
8183       return Error(Operands[2]->getStartLoc(),
8184                    "branch target out of range or not a multiple of 2");
8185 
8186     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8187            "branch location and else branch target should either both be "
8188            "immediates or both labels");
8189 
8190     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8191       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8192       if (Diff != 4 && Diff != 2)
8193         return Error(
8194             Operands[3]->getStartLoc(),
8195             "else branch target must be 2 or 4 greater than the branch location");
8196     }
8197     break;
8198   }
8199   case ARM::t2CLRM: {
8200     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8201       if (Inst.getOperand(i).isReg() &&
8202           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8203               Inst.getOperand(i).getReg())) {
8204         return Error(Operands[2]->getStartLoc(),
8205                      "invalid register in register list. Valid registers are "
8206                      "r0-r12, lr/r14 and APSR.");
8207       }
8208     }
8209     break;
8210   }
8211   case ARM::DSB:
8212   case ARM::t2DSB: {
8213 
8214     if (Inst.getNumOperands() < 2)
8215       break;
8216 
8217     unsigned Option = Inst.getOperand(0).getImm();
8218     unsigned Pred = Inst.getOperand(1).getImm();
8219 
8220     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8221     if (Option == 0 && Pred != ARMCC::AL)
8222       return Error(Operands[1]->getStartLoc(),
8223                    "instruction 'ssbb' is not predicable, but condition code "
8224                    "specified");
8225     if (Option == 4 && Pred != ARMCC::AL)
8226       return Error(Operands[1]->getStartLoc(),
8227                    "instruction 'pssbb' is not predicable, but condition code "
8228                    "specified");
8229     break;
8230   }
8231   case ARM::VMOVRRS: {
8232     // Source registers must be sequential.
8233     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8234     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8235     if (Sm1 != Sm + 1)
8236       return Error(Operands[5]->getStartLoc(),
8237                    "source operands must be sequential");
8238     break;
8239   }
8240   case ARM::VMOVSRR: {
8241     // Destination registers must be sequential.
8242     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8243     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8244     if (Sm1 != Sm + 1)
8245       return Error(Operands[3]->getStartLoc(),
8246                    "destination operands must be sequential");
8247     break;
8248   }
8249   case ARM::VLDMDIA:
8250   case ARM::VSTMDIA: {
8251     ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
8252     auto &RegList = Op.getRegList();
8253     if (RegList.size() < 1 || RegList.size() > 16)
8254       return Error(Operands[3]->getStartLoc(),
8255                    "list of registers must be at least 1 and at most 16");
8256     break;
8257   }
8258   case ARM::MVE_VQDMULLs32bh:
8259   case ARM::MVE_VQDMULLs32th:
8260   case ARM::MVE_VCMULf32:
8261   case ARM::MVE_VMULLBs32:
8262   case ARM::MVE_VMULLTs32:
8263   case ARM::MVE_VMULLBu32:
8264   case ARM::MVE_VMULLTu32: {
8265     if (Operands[3]->getReg() == Operands[4]->getReg()) {
8266       return Error (Operands[3]->getStartLoc(),
8267                     "Qd register and Qn register can't be identical");
8268     }
8269     if (Operands[3]->getReg() == Operands[5]->getReg()) {
8270       return Error (Operands[3]->getStartLoc(),
8271                     "Qd register and Qm register can't be identical");
8272     }
8273     break;
8274   }
8275   case ARM::MVE_VREV64_8:
8276   case ARM::MVE_VREV64_16:
8277   case ARM::MVE_VREV64_32:
8278   case ARM::MVE_VQDMULL_qr_s32bh:
8279   case ARM::MVE_VQDMULL_qr_s32th: {
8280     if (Operands[3]->getReg() == Operands[4]->getReg()) {
8281       return Error (Operands[3]->getStartLoc(),
8282                     "Qd register and Qn register can't be identical");
8283     }
8284     break;
8285   }
8286   case ARM::MVE_VCADDi32:
8287   case ARM::MVE_VCADDf32:
8288   case ARM::MVE_VHCADDs32: {
8289     if (Operands[3]->getReg() == Operands[5]->getReg()) {
8290       return Error (Operands[3]->getStartLoc(),
8291                     "Qd register and Qm register can't be identical");
8292     }
8293     break;
8294   }
8295   case ARM::MVE_VMOV_rr_q: {
8296     if (Operands[4]->getReg() != Operands[6]->getReg())
8297       return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
8298     if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
8299         static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
8300       return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8301     break;
8302   }
8303   case ARM::MVE_VMOV_q_rr: {
8304     if (Operands[2]->getReg() != Operands[4]->getReg())
8305       return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
8306     if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
8307         static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
8308       return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8309     break;
8310   }
8311   case ARM::MVE_SQRSHR:
8312   case ARM::MVE_UQRSHL: {
8313     if (Operands[2]->getReg() == Operands[3]->getReg()) {
8314       return Error(Operands[2]->getStartLoc(),
8315                    "Rda register and Rm register can't be identical");
8316     }
8317     break;
8318   }
8319   case ARM::UMAAL:
8320   case ARM::UMLAL:
8321   case ARM::UMULL:
8322   case ARM::t2UMAAL:
8323   case ARM::t2UMLAL:
8324   case ARM::t2UMULL:
8325   case ARM::SMLAL:
8326   case ARM::SMLALBB:
8327   case ARM::SMLALBT:
8328   case ARM::SMLALD:
8329   case ARM::SMLALDX:
8330   case ARM::SMLALTB:
8331   case ARM::SMLALTT:
8332   case ARM::SMLSLD:
8333   case ARM::SMLSLDX:
8334   case ARM::SMULL:
8335   case ARM::t2SMLAL:
8336   case ARM::t2SMLALBB:
8337   case ARM::t2SMLALBT:
8338   case ARM::t2SMLALD:
8339   case ARM::t2SMLALDX:
8340   case ARM::t2SMLALTB:
8341   case ARM::t2SMLALTT:
8342   case ARM::t2SMLSLD:
8343   case ARM::t2SMLSLDX:
8344   case ARM::t2SMULL: {
8345     unsigned RdHi = Inst.getOperand(0).getReg();
8346     unsigned RdLo = Inst.getOperand(1).getReg();
8347     if(RdHi == RdLo) {
8348       return Error(Loc,
8349                    "unpredictable instruction, RdHi and RdLo must be different");
8350     }
8351     break;
8352   }
8353 
8354   case ARM::CDE_CX1:
8355   case ARM::CDE_CX1A:
8356   case ARM::CDE_CX1D:
8357   case ARM::CDE_CX1DA:
8358   case ARM::CDE_CX2:
8359   case ARM::CDE_CX2A:
8360   case ARM::CDE_CX2D:
8361   case ARM::CDE_CX2DA:
8362   case ARM::CDE_CX3:
8363   case ARM::CDE_CX3A:
8364   case ARM::CDE_CX3D:
8365   case ARM::CDE_CX3DA:
8366   case ARM::CDE_VCX1_vec:
8367   case ARM::CDE_VCX1_fpsp:
8368   case ARM::CDE_VCX1_fpdp:
8369   case ARM::CDE_VCX1A_vec:
8370   case ARM::CDE_VCX1A_fpsp:
8371   case ARM::CDE_VCX1A_fpdp:
8372   case ARM::CDE_VCX2_vec:
8373   case ARM::CDE_VCX2_fpsp:
8374   case ARM::CDE_VCX2_fpdp:
8375   case ARM::CDE_VCX2A_vec:
8376   case ARM::CDE_VCX2A_fpsp:
8377   case ARM::CDE_VCX2A_fpdp:
8378   case ARM::CDE_VCX3_vec:
8379   case ARM::CDE_VCX3_fpsp:
8380   case ARM::CDE_VCX3_fpdp:
8381   case ARM::CDE_VCX3A_vec:
8382   case ARM::CDE_VCX3A_fpsp:
8383   case ARM::CDE_VCX3A_fpdp: {
8384     assert(Inst.getOperand(1).isImm() &&
8385            "CDE operand 1 must be a coprocessor ID");
8386     int64_t Coproc = Inst.getOperand(1).getImm();
8387     if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8388       return Error(Operands[1]->getStartLoc(),
8389                    "coprocessor must be configured as CDE");
8390     else if (Coproc >= 8)
8391       return Error(Operands[1]->getStartLoc(),
8392                    "coprocessor must be in the range [p0, p7]");
8393     break;
8394   }
8395 
8396   case ARM::t2CDP:
8397   case ARM::t2CDP2:
8398   case ARM::t2LDC2L_OFFSET:
8399   case ARM::t2LDC2L_OPTION:
8400   case ARM::t2LDC2L_POST:
8401   case ARM::t2LDC2L_PRE:
8402   case ARM::t2LDC2_OFFSET:
8403   case ARM::t2LDC2_OPTION:
8404   case ARM::t2LDC2_POST:
8405   case ARM::t2LDC2_PRE:
8406   case ARM::t2LDCL_OFFSET:
8407   case ARM::t2LDCL_OPTION:
8408   case ARM::t2LDCL_POST:
8409   case ARM::t2LDCL_PRE:
8410   case ARM::t2LDC_OFFSET:
8411   case ARM::t2LDC_OPTION:
8412   case ARM::t2LDC_POST:
8413   case ARM::t2LDC_PRE:
8414   case ARM::t2MCR:
8415   case ARM::t2MCR2:
8416   case ARM::t2MCRR:
8417   case ARM::t2MCRR2:
8418   case ARM::t2MRC:
8419   case ARM::t2MRC2:
8420   case ARM::t2MRRC:
8421   case ARM::t2MRRC2:
8422   case ARM::t2STC2L_OFFSET:
8423   case ARM::t2STC2L_OPTION:
8424   case ARM::t2STC2L_POST:
8425   case ARM::t2STC2L_PRE:
8426   case ARM::t2STC2_OFFSET:
8427   case ARM::t2STC2_OPTION:
8428   case ARM::t2STC2_POST:
8429   case ARM::t2STC2_PRE:
8430   case ARM::t2STCL_OFFSET:
8431   case ARM::t2STCL_OPTION:
8432   case ARM::t2STCL_POST:
8433   case ARM::t2STCL_PRE:
8434   case ARM::t2STC_OFFSET:
8435   case ARM::t2STC_OPTION:
8436   case ARM::t2STC_POST:
8437   case ARM::t2STC_PRE: {
8438     unsigned Opcode = Inst.getOpcode();
8439     // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8440     // CopInd is the index of the coprocessor operand.
8441     size_t CopInd = 0;
8442     if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8443       CopInd = 2;
8444     else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8445       CopInd = 1;
8446     assert(Inst.getOperand(CopInd).isImm() &&
8447            "Operand must be a coprocessor ID");
8448     int64_t Coproc = Inst.getOperand(CopInd).getImm();
8449     // Operands[2] is the coprocessor operand at syntactic level
8450     if (ARM::isCDECoproc(Coproc, *STI))
8451       return Error(Operands[2]->getStartLoc(),
8452                    "coprocessor must be configured as GCP");
8453     break;
8454   }
8455   }
8456 
8457   return false;
8458 }
8459 
getRealVSTOpcode(unsigned Opc,unsigned & Spacing)8460 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8461   switch(Opc) {
8462   default: llvm_unreachable("unexpected opcode!");
8463   // VST1LN
8464   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8465   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8466   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8467   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8468   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8469   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8470   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
8471   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8472   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8473 
8474   // VST2LN
8475   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8476   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8477   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8478   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8479   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8480 
8481   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8482   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8483   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8484   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8485   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8486 
8487   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
8488   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8489   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8490   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8491   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8492 
8493   // VST3LN
8494   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8495   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8496   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8497   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8498   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8499   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8500   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8501   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8502   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8503   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8504   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
8505   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8506   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8507   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8508   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8509 
8510   // VST3
8511   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8512   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8513   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8514   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8515   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8516   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8517   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8518   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8519   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8520   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8521   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8522   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8523   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
8524   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8525   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8526   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
8527   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8528   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8529 
8530   // VST4LN
8531   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8532   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8533   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8534   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8535   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8536   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8537   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8538   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8539   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8540   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8541   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
8542   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8543   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8544   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8545   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8546 
8547   // VST4
8548   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8549   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8550   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8551   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8552   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8553   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8554   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8555   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8556   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8557   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8558   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8559   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8560   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
8561   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8562   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8563   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
8564   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8565   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8566   }
8567 }
8568 
getRealVLDOpcode(unsigned Opc,unsigned & Spacing)8569 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8570   switch(Opc) {
8571   default: llvm_unreachable("unexpected opcode!");
8572   // VLD1LN
8573   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8574   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8575   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8576   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8577   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8578   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8579   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
8580   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8581   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8582 
8583   // VLD2LN
8584   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8585   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8586   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8587   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8588   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8589   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8590   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8591   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8592   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8593   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8594   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
8595   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8596   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8597   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8598   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8599 
8600   // VLD3DUP
8601   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8602   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8603   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8604   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8605   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8606   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8607   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8608   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8609   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8610   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8611   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8612   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8613   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
8614   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8615   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8616   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8617   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8618   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8619 
8620   // VLD3LN
8621   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8622   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8623   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8624   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8625   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8626   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8627   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8628   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8629   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8630   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8631   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
8632   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8633   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8634   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8635   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8636 
8637   // VLD3
8638   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8639   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8640   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8641   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8642   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8643   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8644   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8645   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8646   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8647   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8648   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8649   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8650   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
8651   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8652   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8653   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
8654   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8655   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8656 
8657   // VLD4LN
8658   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8659   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8660   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8661   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8662   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8663   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8664   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8665   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8666   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8667   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8668   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
8669   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8670   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8671   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8672   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8673 
8674   // VLD4DUP
8675   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8676   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8677   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8678   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8679   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8680   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8681   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8682   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8683   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8684   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8685   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8686   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8687   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
8688   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8689   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8690   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8691   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8692   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8693 
8694   // VLD4
8695   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8696   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8697   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8698   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8699   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8700   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8701   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8702   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8703   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8704   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8705   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8706   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8707   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
8708   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8709   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8710   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
8711   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8712   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8713   }
8714 }
8715 
processInstruction(MCInst & Inst,const OperandVector & Operands,MCStreamer & Out)8716 bool ARMAsmParser::processInstruction(MCInst &Inst,
8717                                       const OperandVector &Operands,
8718                                       MCStreamer &Out) {
8719   // Check if we have the wide qualifier, because if it's present we
8720   // must avoid selecting a 16-bit thumb instruction.
8721   bool HasWideQualifier = false;
8722   for (auto &Op : Operands) {
8723     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8724     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8725       HasWideQualifier = true;
8726       break;
8727     }
8728   }
8729 
8730   switch (Inst.getOpcode()) {
8731   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8732   case ARM::LDRT_POST:
8733   case ARM::LDRBT_POST: {
8734     const unsigned Opcode =
8735       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8736                                            : ARM::LDRBT_POST_IMM;
8737     MCInst TmpInst;
8738     TmpInst.setOpcode(Opcode);
8739     TmpInst.addOperand(Inst.getOperand(0));
8740     TmpInst.addOperand(Inst.getOperand(1));
8741     TmpInst.addOperand(Inst.getOperand(1));
8742     TmpInst.addOperand(MCOperand::createReg(0));
8743     TmpInst.addOperand(MCOperand::createImm(0));
8744     TmpInst.addOperand(Inst.getOperand(2));
8745     TmpInst.addOperand(Inst.getOperand(3));
8746     Inst = TmpInst;
8747     return true;
8748   }
8749   // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8750   case ARM::LDRSBTii:
8751   case ARM::LDRHTii:
8752   case ARM::LDRSHTii: {
8753     MCInst TmpInst;
8754 
8755     if (Inst.getOpcode() == ARM::LDRSBTii)
8756       TmpInst.setOpcode(ARM::LDRSBTi);
8757     else if (Inst.getOpcode() == ARM::LDRHTii)
8758       TmpInst.setOpcode(ARM::LDRHTi);
8759     else if (Inst.getOpcode() == ARM::LDRSHTii)
8760       TmpInst.setOpcode(ARM::LDRSHTi);
8761     TmpInst.addOperand(Inst.getOperand(0));
8762     TmpInst.addOperand(Inst.getOperand(1));
8763     TmpInst.addOperand(Inst.getOperand(1));
8764     TmpInst.addOperand(MCOperand::createImm(256));
8765     TmpInst.addOperand(Inst.getOperand(2));
8766     Inst = TmpInst;
8767     return true;
8768   }
8769   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8770   case ARM::STRT_POST:
8771   case ARM::STRBT_POST: {
8772     const unsigned Opcode =
8773       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8774                                            : ARM::STRBT_POST_IMM;
8775     MCInst TmpInst;
8776     TmpInst.setOpcode(Opcode);
8777     TmpInst.addOperand(Inst.getOperand(1));
8778     TmpInst.addOperand(Inst.getOperand(0));
8779     TmpInst.addOperand(Inst.getOperand(1));
8780     TmpInst.addOperand(MCOperand::createReg(0));
8781     TmpInst.addOperand(MCOperand::createImm(0));
8782     TmpInst.addOperand(Inst.getOperand(2));
8783     TmpInst.addOperand(Inst.getOperand(3));
8784     Inst = TmpInst;
8785     return true;
8786   }
8787   // Alias for alternate form of 'ADR Rd, #imm' instruction.
8788   case ARM::ADDri: {
8789     if (Inst.getOperand(1).getReg() != ARM::PC ||
8790         Inst.getOperand(5).getReg() != 0 ||
8791         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8792       return false;
8793     MCInst TmpInst;
8794     TmpInst.setOpcode(ARM::ADR);
8795     TmpInst.addOperand(Inst.getOperand(0));
8796     if (Inst.getOperand(2).isImm()) {
8797       // Immediate (mod_imm) will be in its encoded form, we must unencode it
8798       // before passing it to the ADR instruction.
8799       unsigned Enc = Inst.getOperand(2).getImm();
8800       TmpInst.addOperand(MCOperand::createImm(
8801           llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8802     } else {
8803       // Turn PC-relative expression into absolute expression.
8804       // Reading PC provides the start of the current instruction + 8 and
8805       // the transform to adr is biased by that.
8806       MCSymbol *Dot = getContext().createTempSymbol();
8807       Out.emitLabel(Dot);
8808       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8809       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8810                                                      MCSymbolRefExpr::VK_None,
8811                                                      getContext());
8812       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8813       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8814                                                      getContext());
8815       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8816                                                         getContext());
8817       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8818     }
8819     TmpInst.addOperand(Inst.getOperand(3));
8820     TmpInst.addOperand(Inst.getOperand(4));
8821     Inst = TmpInst;
8822     return true;
8823   }
8824   // Aliases for imm syntax of LDR instructions.
8825   case ARM::t2LDR_PRE_imm:
8826   case ARM::t2LDR_POST_imm: {
8827     MCInst TmpInst;
8828     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
8829                                                              : ARM::t2LDR_POST);
8830     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8831     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8832     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8833     TmpInst.addOperand(Inst.getOperand(2)); // imm
8834     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8835     Inst = TmpInst;
8836     return true;
8837   }
8838   // Aliases for imm syntax of STR instructions.
8839   case ARM::t2STR_PRE_imm:
8840   case ARM::t2STR_POST_imm: {
8841     MCInst TmpInst;
8842     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
8843                                                              : ARM::t2STR_POST);
8844     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8845     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8846     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8847     TmpInst.addOperand(Inst.getOperand(2)); // imm
8848     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8849     Inst = TmpInst;
8850     return true;
8851   }
8852   // Aliases for imm syntax of LDRB instructions.
8853   case ARM::t2LDRB_OFFSET_imm: {
8854     MCInst TmpInst;
8855     TmpInst.setOpcode(ARM::t2LDRBi8);
8856     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8857     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8858     TmpInst.addOperand(Inst.getOperand(2)); // imm
8859     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8860     Inst = TmpInst;
8861     return true;
8862   }
8863   case ARM::t2LDRB_PRE_imm:
8864   case ARM::t2LDRB_POST_imm: {
8865     MCInst TmpInst;
8866     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
8867                           ? ARM::t2LDRB_PRE
8868                           : ARM::t2LDRB_POST);
8869     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8870     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8871     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8872     TmpInst.addOperand(Inst.getOperand(2)); // imm
8873     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8874     Inst = TmpInst;
8875     return true;
8876   }
8877   // Aliases for imm syntax of STRB instructions.
8878   case ARM::t2STRB_OFFSET_imm: {
8879     MCInst TmpInst;
8880     TmpInst.setOpcode(ARM::t2STRBi8);
8881     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8882     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8883     TmpInst.addOperand(Inst.getOperand(2)); // imm
8884     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8885     Inst = TmpInst;
8886     return true;
8887   }
8888   case ARM::t2STRB_PRE_imm:
8889   case ARM::t2STRB_POST_imm: {
8890     MCInst TmpInst;
8891     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
8892                           ? ARM::t2STRB_PRE
8893                           : ARM::t2STRB_POST);
8894     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8895     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8896     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8897     TmpInst.addOperand(Inst.getOperand(2)); // imm
8898     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8899     Inst = TmpInst;
8900     return true;
8901   }
8902   // Aliases for imm syntax of LDRH instructions.
8903   case ARM::t2LDRH_OFFSET_imm: {
8904     MCInst TmpInst;
8905     TmpInst.setOpcode(ARM::t2LDRHi8);
8906     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8907     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8908     TmpInst.addOperand(Inst.getOperand(2)); // imm
8909     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8910     Inst = TmpInst;
8911     return true;
8912   }
8913   case ARM::t2LDRH_PRE_imm:
8914   case ARM::t2LDRH_POST_imm: {
8915     MCInst TmpInst;
8916     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
8917                           ? ARM::t2LDRH_PRE
8918                           : ARM::t2LDRH_POST);
8919     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8920     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8921     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8922     TmpInst.addOperand(Inst.getOperand(2)); // imm
8923     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8924     Inst = TmpInst;
8925     return true;
8926   }
8927   // Aliases for imm syntax of STRH instructions.
8928   case ARM::t2STRH_OFFSET_imm: {
8929     MCInst TmpInst;
8930     TmpInst.setOpcode(ARM::t2STRHi8);
8931     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8932     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8933     TmpInst.addOperand(Inst.getOperand(2)); // imm
8934     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8935     Inst = TmpInst;
8936     return true;
8937   }
8938   case ARM::t2STRH_PRE_imm:
8939   case ARM::t2STRH_POST_imm: {
8940     MCInst TmpInst;
8941     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
8942                           ? ARM::t2STRH_PRE
8943                           : ARM::t2STRH_POST);
8944     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8945     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8946     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8947     TmpInst.addOperand(Inst.getOperand(2)); // imm
8948     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8949     Inst = TmpInst;
8950     return true;
8951   }
8952   // Aliases for imm syntax of LDRSB instructions.
8953   case ARM::t2LDRSB_OFFSET_imm: {
8954     MCInst TmpInst;
8955     TmpInst.setOpcode(ARM::t2LDRSBi8);
8956     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8957     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8958     TmpInst.addOperand(Inst.getOperand(2)); // imm
8959     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8960     Inst = TmpInst;
8961     return true;
8962   }
8963   case ARM::t2LDRSB_PRE_imm:
8964   case ARM::t2LDRSB_POST_imm: {
8965     MCInst TmpInst;
8966     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
8967                           ? ARM::t2LDRSB_PRE
8968                           : ARM::t2LDRSB_POST);
8969     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8970     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8971     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8972     TmpInst.addOperand(Inst.getOperand(2)); // imm
8973     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8974     Inst = TmpInst;
8975     return true;
8976   }
8977   // Aliases for imm syntax of LDRSH instructions.
8978   case ARM::t2LDRSH_OFFSET_imm: {
8979     MCInst TmpInst;
8980     TmpInst.setOpcode(ARM::t2LDRSHi8);
8981     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8982     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8983     TmpInst.addOperand(Inst.getOperand(2)); // imm
8984     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8985     Inst = TmpInst;
8986     return true;
8987   }
8988   case ARM::t2LDRSH_PRE_imm:
8989   case ARM::t2LDRSH_POST_imm: {
8990     MCInst TmpInst;
8991     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
8992                           ? ARM::t2LDRSH_PRE
8993                           : ARM::t2LDRSH_POST);
8994     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8995     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8996     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8997     TmpInst.addOperand(Inst.getOperand(2)); // imm
8998     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8999     Inst = TmpInst;
9000     return true;
9001   }
9002   // Aliases for alternate PC+imm syntax of LDR instructions.
9003   case ARM::t2LDRpcrel:
9004     // Select the narrow version if the immediate will fit.
9005     if (Inst.getOperand(1).getImm() > 0 &&
9006         Inst.getOperand(1).getImm() <= 0xff &&
9007         !HasWideQualifier)
9008       Inst.setOpcode(ARM::tLDRpci);
9009     else
9010       Inst.setOpcode(ARM::t2LDRpci);
9011     return true;
9012   case ARM::t2LDRBpcrel:
9013     Inst.setOpcode(ARM::t2LDRBpci);
9014     return true;
9015   case ARM::t2LDRHpcrel:
9016     Inst.setOpcode(ARM::t2LDRHpci);
9017     return true;
9018   case ARM::t2LDRSBpcrel:
9019     Inst.setOpcode(ARM::t2LDRSBpci);
9020     return true;
9021   case ARM::t2LDRSHpcrel:
9022     Inst.setOpcode(ARM::t2LDRSHpci);
9023     return true;
9024   case ARM::LDRConstPool:
9025   case ARM::tLDRConstPool:
9026   case ARM::t2LDRConstPool: {
9027     // Pseudo instruction ldr rt, =immediate is converted to a
9028     // MOV rt, immediate if immediate is known and representable
9029     // otherwise we create a constant pool entry that we load from.
9030     MCInst TmpInst;
9031     if (Inst.getOpcode() == ARM::LDRConstPool)
9032       TmpInst.setOpcode(ARM::LDRi12);
9033     else if (Inst.getOpcode() == ARM::tLDRConstPool)
9034       TmpInst.setOpcode(ARM::tLDRpci);
9035     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9036       TmpInst.setOpcode(ARM::t2LDRpci);
9037     const ARMOperand &PoolOperand =
9038       (HasWideQualifier ?
9039        static_cast<ARMOperand &>(*Operands[4]) :
9040        static_cast<ARMOperand &>(*Operands[3]));
9041     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9042     // If SubExprVal is a constant we may be able to use a MOV
9043     if (isa<MCConstantExpr>(SubExprVal) &&
9044         Inst.getOperand(0).getReg() != ARM::PC &&
9045         Inst.getOperand(0).getReg() != ARM::SP) {
9046       int64_t Value =
9047         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9048       bool UseMov  = true;
9049       bool MovHasS = true;
9050       if (Inst.getOpcode() == ARM::LDRConstPool) {
9051         // ARM Constant
9052         if (ARM_AM::getSOImmVal(Value) != -1) {
9053           Value = ARM_AM::getSOImmVal(Value);
9054           TmpInst.setOpcode(ARM::MOVi);
9055         }
9056         else if (ARM_AM::getSOImmVal(~Value) != -1) {
9057           Value = ARM_AM::getSOImmVal(~Value);
9058           TmpInst.setOpcode(ARM::MVNi);
9059         }
9060         else if (hasV6T2Ops() &&
9061                  Value >=0 && Value < 65536) {
9062           TmpInst.setOpcode(ARM::MOVi16);
9063           MovHasS = false;
9064         }
9065         else
9066           UseMov = false;
9067       }
9068       else {
9069         // Thumb/Thumb2 Constant
9070         if (hasThumb2() &&
9071             ARM_AM::getT2SOImmVal(Value) != -1)
9072           TmpInst.setOpcode(ARM::t2MOVi);
9073         else if (hasThumb2() &&
9074                  ARM_AM::getT2SOImmVal(~Value) != -1) {
9075           TmpInst.setOpcode(ARM::t2MVNi);
9076           Value = ~Value;
9077         }
9078         else if (hasV8MBaseline() &&
9079                  Value >=0 && Value < 65536) {
9080           TmpInst.setOpcode(ARM::t2MOVi16);
9081           MovHasS = false;
9082         }
9083         else
9084           UseMov = false;
9085       }
9086       if (UseMov) {
9087         TmpInst.addOperand(Inst.getOperand(0));           // Rt
9088         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
9089         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9090         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9091         if (MovHasS)
9092           TmpInst.addOperand(MCOperand::createReg(0));    // S
9093         Inst = TmpInst;
9094         return true;
9095       }
9096     }
9097     // No opportunity to use MOV/MVN create constant pool
9098     const MCExpr *CPLoc =
9099       getTargetStreamer().addConstantPoolEntry(SubExprVal,
9100                                                PoolOperand.getStartLoc());
9101     TmpInst.addOperand(Inst.getOperand(0));           // Rt
9102     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9103     if (TmpInst.getOpcode() == ARM::LDRi12)
9104       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
9105     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9106     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9107     Inst = TmpInst;
9108     return true;
9109   }
9110   // Handle NEON VST complex aliases.
9111   case ARM::VST1LNdWB_register_Asm_8:
9112   case ARM::VST1LNdWB_register_Asm_16:
9113   case ARM::VST1LNdWB_register_Asm_32: {
9114     MCInst TmpInst;
9115     // Shuffle the operands around so the lane index operand is in the
9116     // right place.
9117     unsigned Spacing;
9118     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9119     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9120     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9121     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9122     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9123     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9124     TmpInst.addOperand(Inst.getOperand(1)); // lane
9125     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9126     TmpInst.addOperand(Inst.getOperand(6));
9127     Inst = TmpInst;
9128     return true;
9129   }
9130 
9131   case ARM::VST2LNdWB_register_Asm_8:
9132   case ARM::VST2LNdWB_register_Asm_16:
9133   case ARM::VST2LNdWB_register_Asm_32:
9134   case ARM::VST2LNqWB_register_Asm_16:
9135   case ARM::VST2LNqWB_register_Asm_32: {
9136     MCInst TmpInst;
9137     // Shuffle the operands around so the lane index operand is in the
9138     // right place.
9139     unsigned Spacing;
9140     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9141     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9142     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9143     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9144     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9145     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9146     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9147                                             Spacing));
9148     TmpInst.addOperand(Inst.getOperand(1)); // lane
9149     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9150     TmpInst.addOperand(Inst.getOperand(6));
9151     Inst = TmpInst;
9152     return true;
9153   }
9154 
9155   case ARM::VST3LNdWB_register_Asm_8:
9156   case ARM::VST3LNdWB_register_Asm_16:
9157   case ARM::VST3LNdWB_register_Asm_32:
9158   case ARM::VST3LNqWB_register_Asm_16:
9159   case ARM::VST3LNqWB_register_Asm_32: {
9160     MCInst TmpInst;
9161     // Shuffle the operands around so the lane index operand is in the
9162     // right place.
9163     unsigned Spacing;
9164     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9165     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9166     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9167     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9168     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9169     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9170     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9171                                             Spacing));
9172     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9173                                             Spacing * 2));
9174     TmpInst.addOperand(Inst.getOperand(1)); // lane
9175     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9176     TmpInst.addOperand(Inst.getOperand(6));
9177     Inst = TmpInst;
9178     return true;
9179   }
9180 
9181   case ARM::VST4LNdWB_register_Asm_8:
9182   case ARM::VST4LNdWB_register_Asm_16:
9183   case ARM::VST4LNdWB_register_Asm_32:
9184   case ARM::VST4LNqWB_register_Asm_16:
9185   case ARM::VST4LNqWB_register_Asm_32: {
9186     MCInst TmpInst;
9187     // Shuffle the operands around so the lane index operand is in the
9188     // right place.
9189     unsigned Spacing;
9190     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9191     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9192     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9193     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9194     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9195     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9196     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9197                                             Spacing));
9198     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9199                                             Spacing * 2));
9200     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9201                                             Spacing * 3));
9202     TmpInst.addOperand(Inst.getOperand(1)); // lane
9203     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9204     TmpInst.addOperand(Inst.getOperand(6));
9205     Inst = TmpInst;
9206     return true;
9207   }
9208 
9209   case ARM::VST1LNdWB_fixed_Asm_8:
9210   case ARM::VST1LNdWB_fixed_Asm_16:
9211   case ARM::VST1LNdWB_fixed_Asm_32: {
9212     MCInst TmpInst;
9213     // Shuffle the operands around so the lane index operand is in the
9214     // right place.
9215     unsigned Spacing;
9216     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9217     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9218     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9219     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9220     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9221     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9222     TmpInst.addOperand(Inst.getOperand(1)); // lane
9223     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9224     TmpInst.addOperand(Inst.getOperand(5));
9225     Inst = TmpInst;
9226     return true;
9227   }
9228 
9229   case ARM::VST2LNdWB_fixed_Asm_8:
9230   case ARM::VST2LNdWB_fixed_Asm_16:
9231   case ARM::VST2LNdWB_fixed_Asm_32:
9232   case ARM::VST2LNqWB_fixed_Asm_16:
9233   case ARM::VST2LNqWB_fixed_Asm_32: {
9234     MCInst TmpInst;
9235     // Shuffle the operands around so the lane index operand is in the
9236     // right place.
9237     unsigned Spacing;
9238     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9239     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9240     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9241     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9242     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9243     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9244     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9245                                             Spacing));
9246     TmpInst.addOperand(Inst.getOperand(1)); // lane
9247     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9248     TmpInst.addOperand(Inst.getOperand(5));
9249     Inst = TmpInst;
9250     return true;
9251   }
9252 
9253   case ARM::VST3LNdWB_fixed_Asm_8:
9254   case ARM::VST3LNdWB_fixed_Asm_16:
9255   case ARM::VST3LNdWB_fixed_Asm_32:
9256   case ARM::VST3LNqWB_fixed_Asm_16:
9257   case ARM::VST3LNqWB_fixed_Asm_32: {
9258     MCInst TmpInst;
9259     // Shuffle the operands around so the lane index operand is in the
9260     // right place.
9261     unsigned Spacing;
9262     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9263     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9264     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9265     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9266     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9267     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9268     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9269                                             Spacing));
9270     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9271                                             Spacing * 2));
9272     TmpInst.addOperand(Inst.getOperand(1)); // lane
9273     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9274     TmpInst.addOperand(Inst.getOperand(5));
9275     Inst = TmpInst;
9276     return true;
9277   }
9278 
9279   case ARM::VST4LNdWB_fixed_Asm_8:
9280   case ARM::VST4LNdWB_fixed_Asm_16:
9281   case ARM::VST4LNdWB_fixed_Asm_32:
9282   case ARM::VST4LNqWB_fixed_Asm_16:
9283   case ARM::VST4LNqWB_fixed_Asm_32: {
9284     MCInst TmpInst;
9285     // Shuffle the operands around so the lane index operand is in the
9286     // right place.
9287     unsigned Spacing;
9288     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9289     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9290     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9291     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9292     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9293     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9294     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9295                                             Spacing));
9296     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9297                                             Spacing * 2));
9298     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9299                                             Spacing * 3));
9300     TmpInst.addOperand(Inst.getOperand(1)); // lane
9301     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9302     TmpInst.addOperand(Inst.getOperand(5));
9303     Inst = TmpInst;
9304     return true;
9305   }
9306 
9307   case ARM::VST1LNdAsm_8:
9308   case ARM::VST1LNdAsm_16:
9309   case ARM::VST1LNdAsm_32: {
9310     MCInst TmpInst;
9311     // Shuffle the operands around so the lane index operand is in the
9312     // right place.
9313     unsigned Spacing;
9314     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9315     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9316     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9317     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9318     TmpInst.addOperand(Inst.getOperand(1)); // lane
9319     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9320     TmpInst.addOperand(Inst.getOperand(5));
9321     Inst = TmpInst;
9322     return true;
9323   }
9324 
9325   case ARM::VST2LNdAsm_8:
9326   case ARM::VST2LNdAsm_16:
9327   case ARM::VST2LNdAsm_32:
9328   case ARM::VST2LNqAsm_16:
9329   case ARM::VST2LNqAsm_32: {
9330     MCInst TmpInst;
9331     // Shuffle the operands around so the lane index operand is in the
9332     // right place.
9333     unsigned Spacing;
9334     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9335     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9336     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9337     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9338     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9339                                             Spacing));
9340     TmpInst.addOperand(Inst.getOperand(1)); // lane
9341     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9342     TmpInst.addOperand(Inst.getOperand(5));
9343     Inst = TmpInst;
9344     return true;
9345   }
9346 
9347   case ARM::VST3LNdAsm_8:
9348   case ARM::VST3LNdAsm_16:
9349   case ARM::VST3LNdAsm_32:
9350   case ARM::VST3LNqAsm_16:
9351   case ARM::VST3LNqAsm_32: {
9352     MCInst TmpInst;
9353     // Shuffle the operands around so the lane index operand is in the
9354     // right place.
9355     unsigned Spacing;
9356     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9357     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9358     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9359     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9360     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9361                                             Spacing));
9362     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9363                                             Spacing * 2));
9364     TmpInst.addOperand(Inst.getOperand(1)); // lane
9365     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9366     TmpInst.addOperand(Inst.getOperand(5));
9367     Inst = TmpInst;
9368     return true;
9369   }
9370 
9371   case ARM::VST4LNdAsm_8:
9372   case ARM::VST4LNdAsm_16:
9373   case ARM::VST4LNdAsm_32:
9374   case ARM::VST4LNqAsm_16:
9375   case ARM::VST4LNqAsm_32: {
9376     MCInst TmpInst;
9377     // Shuffle the operands around so the lane index operand is in the
9378     // right place.
9379     unsigned Spacing;
9380     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9381     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9382     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9383     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9384     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9385                                             Spacing));
9386     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9387                                             Spacing * 2));
9388     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9389                                             Spacing * 3));
9390     TmpInst.addOperand(Inst.getOperand(1)); // lane
9391     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9392     TmpInst.addOperand(Inst.getOperand(5));
9393     Inst = TmpInst;
9394     return true;
9395   }
9396 
9397   // Handle NEON VLD complex aliases.
9398   case ARM::VLD1LNdWB_register_Asm_8:
9399   case ARM::VLD1LNdWB_register_Asm_16:
9400   case ARM::VLD1LNdWB_register_Asm_32: {
9401     MCInst TmpInst;
9402     // Shuffle the operands around so the lane index operand is in the
9403     // right place.
9404     unsigned Spacing;
9405     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9406     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9407     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9408     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9409     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9410     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9411     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9412     TmpInst.addOperand(Inst.getOperand(1)); // lane
9413     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9414     TmpInst.addOperand(Inst.getOperand(6));
9415     Inst = TmpInst;
9416     return true;
9417   }
9418 
9419   case ARM::VLD2LNdWB_register_Asm_8:
9420   case ARM::VLD2LNdWB_register_Asm_16:
9421   case ARM::VLD2LNdWB_register_Asm_32:
9422   case ARM::VLD2LNqWB_register_Asm_16:
9423   case ARM::VLD2LNqWB_register_Asm_32: {
9424     MCInst TmpInst;
9425     // Shuffle the operands around so the lane index operand is in the
9426     // right place.
9427     unsigned Spacing;
9428     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9429     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9430     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9431                                             Spacing));
9432     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9433     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9434     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9435     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9436     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9437     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9438                                             Spacing));
9439     TmpInst.addOperand(Inst.getOperand(1)); // lane
9440     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9441     TmpInst.addOperand(Inst.getOperand(6));
9442     Inst = TmpInst;
9443     return true;
9444   }
9445 
9446   case ARM::VLD3LNdWB_register_Asm_8:
9447   case ARM::VLD3LNdWB_register_Asm_16:
9448   case ARM::VLD3LNdWB_register_Asm_32:
9449   case ARM::VLD3LNqWB_register_Asm_16:
9450   case ARM::VLD3LNqWB_register_Asm_32: {
9451     MCInst TmpInst;
9452     // Shuffle the operands around so the lane index operand is in the
9453     // right place.
9454     unsigned Spacing;
9455     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9456     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9457     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9458                                             Spacing));
9459     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9460                                             Spacing * 2));
9461     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9462     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9463     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9464     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9465     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9466     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9467                                             Spacing));
9468     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9469                                             Spacing * 2));
9470     TmpInst.addOperand(Inst.getOperand(1)); // lane
9471     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9472     TmpInst.addOperand(Inst.getOperand(6));
9473     Inst = TmpInst;
9474     return true;
9475   }
9476 
9477   case ARM::VLD4LNdWB_register_Asm_8:
9478   case ARM::VLD4LNdWB_register_Asm_16:
9479   case ARM::VLD4LNdWB_register_Asm_32:
9480   case ARM::VLD4LNqWB_register_Asm_16:
9481   case ARM::VLD4LNqWB_register_Asm_32: {
9482     MCInst TmpInst;
9483     // Shuffle the operands around so the lane index operand is in the
9484     // right place.
9485     unsigned Spacing;
9486     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9487     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9488     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9489                                             Spacing));
9490     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9491                                             Spacing * 2));
9492     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9493                                             Spacing * 3));
9494     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9495     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9496     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9497     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9498     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9499     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9500                                             Spacing));
9501     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9502                                             Spacing * 2));
9503     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9504                                             Spacing * 3));
9505     TmpInst.addOperand(Inst.getOperand(1)); // lane
9506     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9507     TmpInst.addOperand(Inst.getOperand(6));
9508     Inst = TmpInst;
9509     return true;
9510   }
9511 
9512   case ARM::VLD1LNdWB_fixed_Asm_8:
9513   case ARM::VLD1LNdWB_fixed_Asm_16:
9514   case ARM::VLD1LNdWB_fixed_Asm_32: {
9515     MCInst TmpInst;
9516     // Shuffle the operands around so the lane index operand is in the
9517     // right place.
9518     unsigned Spacing;
9519     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9520     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9521     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9522     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9523     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9524     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9525     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9526     TmpInst.addOperand(Inst.getOperand(1)); // lane
9527     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9528     TmpInst.addOperand(Inst.getOperand(5));
9529     Inst = TmpInst;
9530     return true;
9531   }
9532 
9533   case ARM::VLD2LNdWB_fixed_Asm_8:
9534   case ARM::VLD2LNdWB_fixed_Asm_16:
9535   case ARM::VLD2LNdWB_fixed_Asm_32:
9536   case ARM::VLD2LNqWB_fixed_Asm_16:
9537   case ARM::VLD2LNqWB_fixed_Asm_32: {
9538     MCInst TmpInst;
9539     // Shuffle the operands around so the lane index operand is in the
9540     // right place.
9541     unsigned Spacing;
9542     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9543     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9544     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9545                                             Spacing));
9546     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9547     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9548     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9549     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9550     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9551     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9552                                             Spacing));
9553     TmpInst.addOperand(Inst.getOperand(1)); // lane
9554     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9555     TmpInst.addOperand(Inst.getOperand(5));
9556     Inst = TmpInst;
9557     return true;
9558   }
9559 
9560   case ARM::VLD3LNdWB_fixed_Asm_8:
9561   case ARM::VLD3LNdWB_fixed_Asm_16:
9562   case ARM::VLD3LNdWB_fixed_Asm_32:
9563   case ARM::VLD3LNqWB_fixed_Asm_16:
9564   case ARM::VLD3LNqWB_fixed_Asm_32: {
9565     MCInst TmpInst;
9566     // Shuffle the operands around so the lane index operand is in the
9567     // right place.
9568     unsigned Spacing;
9569     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9570     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9571     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9572                                             Spacing));
9573     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9574                                             Spacing * 2));
9575     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9576     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9577     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9578     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9579     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9580     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9581                                             Spacing));
9582     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9583                                             Spacing * 2));
9584     TmpInst.addOperand(Inst.getOperand(1)); // lane
9585     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9586     TmpInst.addOperand(Inst.getOperand(5));
9587     Inst = TmpInst;
9588     return true;
9589   }
9590 
9591   case ARM::VLD4LNdWB_fixed_Asm_8:
9592   case ARM::VLD4LNdWB_fixed_Asm_16:
9593   case ARM::VLD4LNdWB_fixed_Asm_32:
9594   case ARM::VLD4LNqWB_fixed_Asm_16:
9595   case ARM::VLD4LNqWB_fixed_Asm_32: {
9596     MCInst TmpInst;
9597     // Shuffle the operands around so the lane index operand is in the
9598     // right place.
9599     unsigned Spacing;
9600     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9601     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9602     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9603                                             Spacing));
9604     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9605                                             Spacing * 2));
9606     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9607                                             Spacing * 3));
9608     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9609     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9610     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9611     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9612     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9613     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9614                                             Spacing));
9615     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9616                                             Spacing * 2));
9617     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9618                                             Spacing * 3));
9619     TmpInst.addOperand(Inst.getOperand(1)); // lane
9620     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9621     TmpInst.addOperand(Inst.getOperand(5));
9622     Inst = TmpInst;
9623     return true;
9624   }
9625 
9626   case ARM::VLD1LNdAsm_8:
9627   case ARM::VLD1LNdAsm_16:
9628   case ARM::VLD1LNdAsm_32: {
9629     MCInst TmpInst;
9630     // Shuffle the operands around so the lane index operand is in the
9631     // right place.
9632     unsigned Spacing;
9633     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9634     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9635     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9636     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9637     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9638     TmpInst.addOperand(Inst.getOperand(1)); // lane
9639     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9640     TmpInst.addOperand(Inst.getOperand(5));
9641     Inst = TmpInst;
9642     return true;
9643   }
9644 
9645   case ARM::VLD2LNdAsm_8:
9646   case ARM::VLD2LNdAsm_16:
9647   case ARM::VLD2LNdAsm_32:
9648   case ARM::VLD2LNqAsm_16:
9649   case ARM::VLD2LNqAsm_32: {
9650     MCInst TmpInst;
9651     // Shuffle the operands around so the lane index operand is in the
9652     // right place.
9653     unsigned Spacing;
9654     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9655     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9656     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9657                                             Spacing));
9658     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9659     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9660     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9661     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9662                                             Spacing));
9663     TmpInst.addOperand(Inst.getOperand(1)); // lane
9664     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9665     TmpInst.addOperand(Inst.getOperand(5));
9666     Inst = TmpInst;
9667     return true;
9668   }
9669 
9670   case ARM::VLD3LNdAsm_8:
9671   case ARM::VLD3LNdAsm_16:
9672   case ARM::VLD3LNdAsm_32:
9673   case ARM::VLD3LNqAsm_16:
9674   case ARM::VLD3LNqAsm_32: {
9675     MCInst TmpInst;
9676     // Shuffle the operands around so the lane index operand is in the
9677     // right place.
9678     unsigned Spacing;
9679     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9680     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9681     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9682                                             Spacing));
9683     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9684                                             Spacing * 2));
9685     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9686     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9687     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9688     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9689                                             Spacing));
9690     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9691                                             Spacing * 2));
9692     TmpInst.addOperand(Inst.getOperand(1)); // lane
9693     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9694     TmpInst.addOperand(Inst.getOperand(5));
9695     Inst = TmpInst;
9696     return true;
9697   }
9698 
9699   case ARM::VLD4LNdAsm_8:
9700   case ARM::VLD4LNdAsm_16:
9701   case ARM::VLD4LNdAsm_32:
9702   case ARM::VLD4LNqAsm_16:
9703   case ARM::VLD4LNqAsm_32: {
9704     MCInst TmpInst;
9705     // Shuffle the operands around so the lane index operand is in the
9706     // right place.
9707     unsigned Spacing;
9708     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9709     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9710     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9711                                             Spacing));
9712     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9713                                             Spacing * 2));
9714     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9715                                             Spacing * 3));
9716     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9717     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9718     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9719     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9720                                             Spacing));
9721     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9722                                             Spacing * 2));
9723     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9724                                             Spacing * 3));
9725     TmpInst.addOperand(Inst.getOperand(1)); // lane
9726     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9727     TmpInst.addOperand(Inst.getOperand(5));
9728     Inst = TmpInst;
9729     return true;
9730   }
9731 
9732   // VLD3DUP single 3-element structure to all lanes instructions.
9733   case ARM::VLD3DUPdAsm_8:
9734   case ARM::VLD3DUPdAsm_16:
9735   case ARM::VLD3DUPdAsm_32:
9736   case ARM::VLD3DUPqAsm_8:
9737   case ARM::VLD3DUPqAsm_16:
9738   case ARM::VLD3DUPqAsm_32: {
9739     MCInst TmpInst;
9740     unsigned Spacing;
9741     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9742     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9743     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9744                                             Spacing));
9745     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9746                                             Spacing * 2));
9747     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9748     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9749     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9750     TmpInst.addOperand(Inst.getOperand(4));
9751     Inst = TmpInst;
9752     return true;
9753   }
9754 
9755   case ARM::VLD3DUPdWB_fixed_Asm_8:
9756   case ARM::VLD3DUPdWB_fixed_Asm_16:
9757   case ARM::VLD3DUPdWB_fixed_Asm_32:
9758   case ARM::VLD3DUPqWB_fixed_Asm_8:
9759   case ARM::VLD3DUPqWB_fixed_Asm_16:
9760   case ARM::VLD3DUPqWB_fixed_Asm_32: {
9761     MCInst TmpInst;
9762     unsigned Spacing;
9763     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9764     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9765     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9766                                             Spacing));
9767     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9768                                             Spacing * 2));
9769     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9770     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9771     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9772     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9773     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9774     TmpInst.addOperand(Inst.getOperand(4));
9775     Inst = TmpInst;
9776     return true;
9777   }
9778 
9779   case ARM::VLD3DUPdWB_register_Asm_8:
9780   case ARM::VLD3DUPdWB_register_Asm_16:
9781   case ARM::VLD3DUPdWB_register_Asm_32:
9782   case ARM::VLD3DUPqWB_register_Asm_8:
9783   case ARM::VLD3DUPqWB_register_Asm_16:
9784   case ARM::VLD3DUPqWB_register_Asm_32: {
9785     MCInst TmpInst;
9786     unsigned Spacing;
9787     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9788     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9789     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9790                                             Spacing));
9791     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9792                                             Spacing * 2));
9793     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9794     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9795     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9796     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9797     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9798     TmpInst.addOperand(Inst.getOperand(5));
9799     Inst = TmpInst;
9800     return true;
9801   }
9802 
9803   // VLD3 multiple 3-element structure instructions.
9804   case ARM::VLD3dAsm_8:
9805   case ARM::VLD3dAsm_16:
9806   case ARM::VLD3dAsm_32:
9807   case ARM::VLD3qAsm_8:
9808   case ARM::VLD3qAsm_16:
9809   case ARM::VLD3qAsm_32: {
9810     MCInst TmpInst;
9811     unsigned Spacing;
9812     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9813     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9814     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9815                                             Spacing));
9816     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9817                                             Spacing * 2));
9818     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9819     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9820     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9821     TmpInst.addOperand(Inst.getOperand(4));
9822     Inst = TmpInst;
9823     return true;
9824   }
9825 
9826   case ARM::VLD3dWB_fixed_Asm_8:
9827   case ARM::VLD3dWB_fixed_Asm_16:
9828   case ARM::VLD3dWB_fixed_Asm_32:
9829   case ARM::VLD3qWB_fixed_Asm_8:
9830   case ARM::VLD3qWB_fixed_Asm_16:
9831   case ARM::VLD3qWB_fixed_Asm_32: {
9832     MCInst TmpInst;
9833     unsigned Spacing;
9834     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9835     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9836     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9837                                             Spacing));
9838     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9839                                             Spacing * 2));
9840     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9841     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9842     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9843     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9844     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9845     TmpInst.addOperand(Inst.getOperand(4));
9846     Inst = TmpInst;
9847     return true;
9848   }
9849 
9850   case ARM::VLD3dWB_register_Asm_8:
9851   case ARM::VLD3dWB_register_Asm_16:
9852   case ARM::VLD3dWB_register_Asm_32:
9853   case ARM::VLD3qWB_register_Asm_8:
9854   case ARM::VLD3qWB_register_Asm_16:
9855   case ARM::VLD3qWB_register_Asm_32: {
9856     MCInst TmpInst;
9857     unsigned Spacing;
9858     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9859     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9860     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9861                                             Spacing));
9862     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9863                                             Spacing * 2));
9864     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9865     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9866     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9867     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9868     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9869     TmpInst.addOperand(Inst.getOperand(5));
9870     Inst = TmpInst;
9871     return true;
9872   }
9873 
9874   // VLD4DUP single 3-element structure to all lanes instructions.
9875   case ARM::VLD4DUPdAsm_8:
9876   case ARM::VLD4DUPdAsm_16:
9877   case ARM::VLD4DUPdAsm_32:
9878   case ARM::VLD4DUPqAsm_8:
9879   case ARM::VLD4DUPqAsm_16:
9880   case ARM::VLD4DUPqAsm_32: {
9881     MCInst TmpInst;
9882     unsigned Spacing;
9883     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9884     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9885     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9886                                             Spacing));
9887     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9888                                             Spacing * 2));
9889     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9890                                             Spacing * 3));
9891     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9892     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9893     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9894     TmpInst.addOperand(Inst.getOperand(4));
9895     Inst = TmpInst;
9896     return true;
9897   }
9898 
9899   case ARM::VLD4DUPdWB_fixed_Asm_8:
9900   case ARM::VLD4DUPdWB_fixed_Asm_16:
9901   case ARM::VLD4DUPdWB_fixed_Asm_32:
9902   case ARM::VLD4DUPqWB_fixed_Asm_8:
9903   case ARM::VLD4DUPqWB_fixed_Asm_16:
9904   case ARM::VLD4DUPqWB_fixed_Asm_32: {
9905     MCInst TmpInst;
9906     unsigned Spacing;
9907     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9908     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9909     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9910                                             Spacing));
9911     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9912                                             Spacing * 2));
9913     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9914                                             Spacing * 3));
9915     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9916     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9917     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9918     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9919     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9920     TmpInst.addOperand(Inst.getOperand(4));
9921     Inst = TmpInst;
9922     return true;
9923   }
9924 
9925   case ARM::VLD4DUPdWB_register_Asm_8:
9926   case ARM::VLD4DUPdWB_register_Asm_16:
9927   case ARM::VLD4DUPdWB_register_Asm_32:
9928   case ARM::VLD4DUPqWB_register_Asm_8:
9929   case ARM::VLD4DUPqWB_register_Asm_16:
9930   case ARM::VLD4DUPqWB_register_Asm_32: {
9931     MCInst TmpInst;
9932     unsigned Spacing;
9933     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9934     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9935     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9936                                             Spacing));
9937     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9938                                             Spacing * 2));
9939     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9940                                             Spacing * 3));
9941     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9942     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9943     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9944     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9945     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9946     TmpInst.addOperand(Inst.getOperand(5));
9947     Inst = TmpInst;
9948     return true;
9949   }
9950 
9951   // VLD4 multiple 4-element structure instructions.
9952   case ARM::VLD4dAsm_8:
9953   case ARM::VLD4dAsm_16:
9954   case ARM::VLD4dAsm_32:
9955   case ARM::VLD4qAsm_8:
9956   case ARM::VLD4qAsm_16:
9957   case ARM::VLD4qAsm_32: {
9958     MCInst TmpInst;
9959     unsigned Spacing;
9960     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9961     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9962     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9963                                             Spacing));
9964     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9965                                             Spacing * 2));
9966     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9967                                             Spacing * 3));
9968     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9969     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9970     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9971     TmpInst.addOperand(Inst.getOperand(4));
9972     Inst = TmpInst;
9973     return true;
9974   }
9975 
9976   case ARM::VLD4dWB_fixed_Asm_8:
9977   case ARM::VLD4dWB_fixed_Asm_16:
9978   case ARM::VLD4dWB_fixed_Asm_32:
9979   case ARM::VLD4qWB_fixed_Asm_8:
9980   case ARM::VLD4qWB_fixed_Asm_16:
9981   case ARM::VLD4qWB_fixed_Asm_32: {
9982     MCInst TmpInst;
9983     unsigned Spacing;
9984     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9985     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9986     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9987                                             Spacing));
9988     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9989                                             Spacing * 2));
9990     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9991                                             Spacing * 3));
9992     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9993     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9994     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9995     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9996     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9997     TmpInst.addOperand(Inst.getOperand(4));
9998     Inst = TmpInst;
9999     return true;
10000   }
10001 
10002   case ARM::VLD4dWB_register_Asm_8:
10003   case ARM::VLD4dWB_register_Asm_16:
10004   case ARM::VLD4dWB_register_Asm_32:
10005   case ARM::VLD4qWB_register_Asm_8:
10006   case ARM::VLD4qWB_register_Asm_16:
10007   case ARM::VLD4qWB_register_Asm_32: {
10008     MCInst TmpInst;
10009     unsigned Spacing;
10010     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10011     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10012     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10013                                             Spacing));
10014     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10015                                             Spacing * 2));
10016     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10017                                             Spacing * 3));
10018     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10019     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10020     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10021     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10022     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10023     TmpInst.addOperand(Inst.getOperand(5));
10024     Inst = TmpInst;
10025     return true;
10026   }
10027 
10028   // VST3 multiple 3-element structure instructions.
10029   case ARM::VST3dAsm_8:
10030   case ARM::VST3dAsm_16:
10031   case ARM::VST3dAsm_32:
10032   case ARM::VST3qAsm_8:
10033   case ARM::VST3qAsm_16:
10034   case ARM::VST3qAsm_32: {
10035     MCInst TmpInst;
10036     unsigned Spacing;
10037     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10038     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10039     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10040     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10041     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10042                                             Spacing));
10043     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10044                                             Spacing * 2));
10045     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10046     TmpInst.addOperand(Inst.getOperand(4));
10047     Inst = TmpInst;
10048     return true;
10049   }
10050 
10051   case ARM::VST3dWB_fixed_Asm_8:
10052   case ARM::VST3dWB_fixed_Asm_16:
10053   case ARM::VST3dWB_fixed_Asm_32:
10054   case ARM::VST3qWB_fixed_Asm_8:
10055   case ARM::VST3qWB_fixed_Asm_16:
10056   case ARM::VST3qWB_fixed_Asm_32: {
10057     MCInst TmpInst;
10058     unsigned Spacing;
10059     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10060     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10061     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10062     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10063     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10064     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10065     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10066                                             Spacing));
10067     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10068                                             Spacing * 2));
10069     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10070     TmpInst.addOperand(Inst.getOperand(4));
10071     Inst = TmpInst;
10072     return true;
10073   }
10074 
10075   case ARM::VST3dWB_register_Asm_8:
10076   case ARM::VST3dWB_register_Asm_16:
10077   case ARM::VST3dWB_register_Asm_32:
10078   case ARM::VST3qWB_register_Asm_8:
10079   case ARM::VST3qWB_register_Asm_16:
10080   case ARM::VST3qWB_register_Asm_32: {
10081     MCInst TmpInst;
10082     unsigned Spacing;
10083     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10084     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10085     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10086     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10087     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10088     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10089     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10090                                             Spacing));
10091     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10092                                             Spacing * 2));
10093     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10094     TmpInst.addOperand(Inst.getOperand(5));
10095     Inst = TmpInst;
10096     return true;
10097   }
10098 
10099   // VST4 multiple 3-element structure instructions.
10100   case ARM::VST4dAsm_8:
10101   case ARM::VST4dAsm_16:
10102   case ARM::VST4dAsm_32:
10103   case ARM::VST4qAsm_8:
10104   case ARM::VST4qAsm_16:
10105   case ARM::VST4qAsm_32: {
10106     MCInst TmpInst;
10107     unsigned Spacing;
10108     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10109     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10110     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10111     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10112     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10113                                             Spacing));
10114     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10115                                             Spacing * 2));
10116     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10117                                             Spacing * 3));
10118     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10119     TmpInst.addOperand(Inst.getOperand(4));
10120     Inst = TmpInst;
10121     return true;
10122   }
10123 
10124   case ARM::VST4dWB_fixed_Asm_8:
10125   case ARM::VST4dWB_fixed_Asm_16:
10126   case ARM::VST4dWB_fixed_Asm_32:
10127   case ARM::VST4qWB_fixed_Asm_8:
10128   case ARM::VST4qWB_fixed_Asm_16:
10129   case ARM::VST4qWB_fixed_Asm_32: {
10130     MCInst TmpInst;
10131     unsigned Spacing;
10132     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10133     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10134     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10135     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10136     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10137     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10138     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10139                                             Spacing));
10140     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10141                                             Spacing * 2));
10142     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10143                                             Spacing * 3));
10144     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10145     TmpInst.addOperand(Inst.getOperand(4));
10146     Inst = TmpInst;
10147     return true;
10148   }
10149 
10150   case ARM::VST4dWB_register_Asm_8:
10151   case ARM::VST4dWB_register_Asm_16:
10152   case ARM::VST4dWB_register_Asm_32:
10153   case ARM::VST4qWB_register_Asm_8:
10154   case ARM::VST4qWB_register_Asm_16:
10155   case ARM::VST4qWB_register_Asm_32: {
10156     MCInst TmpInst;
10157     unsigned Spacing;
10158     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10159     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10160     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10161     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10162     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10163     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10164     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10165                                             Spacing));
10166     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10167                                             Spacing * 2));
10168     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10169                                             Spacing * 3));
10170     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10171     TmpInst.addOperand(Inst.getOperand(5));
10172     Inst = TmpInst;
10173     return true;
10174   }
10175 
10176   // Handle encoding choice for the shift-immediate instructions.
10177   case ARM::t2LSLri:
10178   case ARM::t2LSRri:
10179   case ARM::t2ASRri:
10180     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10181         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10182         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10183         !HasWideQualifier) {
10184       unsigned NewOpc;
10185       switch (Inst.getOpcode()) {
10186       default: llvm_unreachable("unexpected opcode");
10187       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10188       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10189       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10190       }
10191       // The Thumb1 operands aren't in the same order. Awesome, eh?
10192       MCInst TmpInst;
10193       TmpInst.setOpcode(NewOpc);
10194       TmpInst.addOperand(Inst.getOperand(0));
10195       TmpInst.addOperand(Inst.getOperand(5));
10196       TmpInst.addOperand(Inst.getOperand(1));
10197       TmpInst.addOperand(Inst.getOperand(2));
10198       TmpInst.addOperand(Inst.getOperand(3));
10199       TmpInst.addOperand(Inst.getOperand(4));
10200       Inst = TmpInst;
10201       return true;
10202     }
10203     return false;
10204 
10205   // Handle the Thumb2 mode MOV complex aliases.
10206   case ARM::t2MOVsr:
10207   case ARM::t2MOVSsr: {
10208     // Which instruction to expand to depends on the CCOut operand and
10209     // whether we're in an IT block if the register operands are low
10210     // registers.
10211     bool isNarrow = false;
10212     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10213         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10214         isARMLowRegister(Inst.getOperand(2).getReg()) &&
10215         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10216         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10217         !HasWideQualifier)
10218       isNarrow = true;
10219     MCInst TmpInst;
10220     unsigned newOpc;
10221     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10222     default: llvm_unreachable("unexpected opcode!");
10223     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10224     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10225     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10226     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
10227     }
10228     TmpInst.setOpcode(newOpc);
10229     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10230     if (isNarrow)
10231       TmpInst.addOperand(MCOperand::createReg(
10232           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10233     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10234     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10235     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10236     TmpInst.addOperand(Inst.getOperand(5));
10237     if (!isNarrow)
10238       TmpInst.addOperand(MCOperand::createReg(
10239           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10240     Inst = TmpInst;
10241     return true;
10242   }
10243   case ARM::t2MOVsi:
10244   case ARM::t2MOVSsi: {
10245     // Which instruction to expand to depends on the CCOut operand and
10246     // whether we're in an IT block if the register operands are low
10247     // registers.
10248     bool isNarrow = false;
10249     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10250         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10251         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10252         !HasWideQualifier)
10253       isNarrow = true;
10254     MCInst TmpInst;
10255     unsigned newOpc;
10256     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10257     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10258     bool isMov = false;
10259     // MOV rd, rm, LSL #0 is actually a MOV instruction
10260     if (Shift == ARM_AM::lsl && Amount == 0) {
10261       isMov = true;
10262       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10263       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10264       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10265       // instead.
10266       if (inITBlock()) {
10267         isNarrow = false;
10268       }
10269       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10270     } else {
10271       switch(Shift) {
10272       default: llvm_unreachable("unexpected opcode!");
10273       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10274       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10275       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10276       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10277       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10278       }
10279     }
10280     if (Amount == 32) Amount = 0;
10281     TmpInst.setOpcode(newOpc);
10282     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10283     if (isNarrow && !isMov)
10284       TmpInst.addOperand(MCOperand::createReg(
10285           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10286     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10287     if (newOpc != ARM::t2RRX && !isMov)
10288       TmpInst.addOperand(MCOperand::createImm(Amount));
10289     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10290     TmpInst.addOperand(Inst.getOperand(4));
10291     if (!isNarrow)
10292       TmpInst.addOperand(MCOperand::createReg(
10293           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10294     Inst = TmpInst;
10295     return true;
10296   }
10297   // Handle the ARM mode MOV complex aliases.
10298   case ARM::ASRr:
10299   case ARM::LSRr:
10300   case ARM::LSLr:
10301   case ARM::RORr: {
10302     ARM_AM::ShiftOpc ShiftTy;
10303     switch(Inst.getOpcode()) {
10304     default: llvm_unreachable("unexpected opcode!");
10305     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10306     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10307     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10308     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10309     }
10310     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10311     MCInst TmpInst;
10312     TmpInst.setOpcode(ARM::MOVsr);
10313     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10314     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10315     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10316     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10317     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10318     TmpInst.addOperand(Inst.getOperand(4));
10319     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10320     Inst = TmpInst;
10321     return true;
10322   }
10323   case ARM::ASRi:
10324   case ARM::LSRi:
10325   case ARM::LSLi:
10326   case ARM::RORi: {
10327     ARM_AM::ShiftOpc ShiftTy;
10328     switch(Inst.getOpcode()) {
10329     default: llvm_unreachable("unexpected opcode!");
10330     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10331     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10332     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10333     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10334     }
10335     // A shift by zero is a plain MOVr, not a MOVsi.
10336     unsigned Amt = Inst.getOperand(2).getImm();
10337     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10338     // A shift by 32 should be encoded as 0 when permitted
10339     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10340       Amt = 0;
10341     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10342     MCInst TmpInst;
10343     TmpInst.setOpcode(Opc);
10344     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10345     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10346     if (Opc == ARM::MOVsi)
10347       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10348     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10349     TmpInst.addOperand(Inst.getOperand(4));
10350     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10351     Inst = TmpInst;
10352     return true;
10353   }
10354   case ARM::RRXi: {
10355     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10356     MCInst TmpInst;
10357     TmpInst.setOpcode(ARM::MOVsi);
10358     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10359     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10360     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10361     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10362     TmpInst.addOperand(Inst.getOperand(3));
10363     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10364     Inst = TmpInst;
10365     return true;
10366   }
10367   case ARM::t2LDMIA_UPD: {
10368     // If this is a load of a single register, then we should use
10369     // a post-indexed LDR instruction instead, per the ARM ARM.
10370     if (Inst.getNumOperands() != 5)
10371       return false;
10372     MCInst TmpInst;
10373     TmpInst.setOpcode(ARM::t2LDR_POST);
10374     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10375     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10376     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10377     TmpInst.addOperand(MCOperand::createImm(4));
10378     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10379     TmpInst.addOperand(Inst.getOperand(3));
10380     Inst = TmpInst;
10381     return true;
10382   }
10383   case ARM::t2STMDB_UPD: {
10384     // If this is a store of a single register, then we should use
10385     // a pre-indexed STR instruction instead, per the ARM ARM.
10386     if (Inst.getNumOperands() != 5)
10387       return false;
10388     MCInst TmpInst;
10389     TmpInst.setOpcode(ARM::t2STR_PRE);
10390     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10391     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10392     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10393     TmpInst.addOperand(MCOperand::createImm(-4));
10394     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10395     TmpInst.addOperand(Inst.getOperand(3));
10396     Inst = TmpInst;
10397     return true;
10398   }
10399   case ARM::LDMIA_UPD:
10400     // If this is a load of a single register via a 'pop', then we should use
10401     // a post-indexed LDR instruction instead, per the ARM ARM.
10402     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10403         Inst.getNumOperands() == 5) {
10404       MCInst TmpInst;
10405       TmpInst.setOpcode(ARM::LDR_POST_IMM);
10406       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10407       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10408       TmpInst.addOperand(Inst.getOperand(1)); // Rn
10409       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
10410       TmpInst.addOperand(MCOperand::createImm(4));
10411       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10412       TmpInst.addOperand(Inst.getOperand(3));
10413       Inst = TmpInst;
10414       return true;
10415     }
10416     break;
10417   case ARM::STMDB_UPD:
10418     // If this is a store of a single register via a 'push', then we should use
10419     // a pre-indexed STR instruction instead, per the ARM ARM.
10420     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10421         Inst.getNumOperands() == 5) {
10422       MCInst TmpInst;
10423       TmpInst.setOpcode(ARM::STR_PRE_IMM);
10424       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10425       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10426       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10427       TmpInst.addOperand(MCOperand::createImm(-4));
10428       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10429       TmpInst.addOperand(Inst.getOperand(3));
10430       Inst = TmpInst;
10431     }
10432     break;
10433   case ARM::t2ADDri12:
10434   case ARM::t2SUBri12:
10435   case ARM::t2ADDspImm12:
10436   case ARM::t2SUBspImm12: {
10437     // If the immediate fits for encoding T3 and the generic
10438     // mnemonic was used, encoding T3 is preferred.
10439     const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10440     if ((Token != "add" && Token != "sub") ||
10441         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10442       break;
10443     switch (Inst.getOpcode()) {
10444     case ARM::t2ADDri12:
10445       Inst.setOpcode(ARM::t2ADDri);
10446       break;
10447     case ARM::t2SUBri12:
10448       Inst.setOpcode(ARM::t2SUBri);
10449       break;
10450     case ARM::t2ADDspImm12:
10451       Inst.setOpcode(ARM::t2ADDspImm);
10452       break;
10453     case ARM::t2SUBspImm12:
10454       Inst.setOpcode(ARM::t2SUBspImm);
10455       break;
10456     }
10457 
10458     Inst.addOperand(MCOperand::createReg(0)); // cc_out
10459     return true;
10460   }
10461   case ARM::tADDi8:
10462     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10463     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10464     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10465     // to encoding T1 if <Rd> is omitted."
10466     if (Inst.getOperand(3).isImm() &&
10467         (unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10468       Inst.setOpcode(ARM::tADDi3);
10469       return true;
10470     }
10471     break;
10472   case ARM::tSUBi8:
10473     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10474     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10475     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10476     // to encoding T1 if <Rd> is omitted."
10477     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10478       Inst.setOpcode(ARM::tSUBi3);
10479       return true;
10480     }
10481     break;
10482   case ARM::t2ADDri:
10483   case ARM::t2SUBri: {
10484     // If the destination and first source operand are the same, and
10485     // the flags are compatible with the current IT status, use encoding T2
10486     // instead of T3. For compatibility with the system 'as'. Make sure the
10487     // wide encoding wasn't explicit.
10488     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10489         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10490         (Inst.getOperand(2).isImm() &&
10491          (unsigned)Inst.getOperand(2).getImm() > 255) ||
10492         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10493         HasWideQualifier)
10494       break;
10495     MCInst TmpInst;
10496     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10497                       ARM::tADDi8 : ARM::tSUBi8);
10498     TmpInst.addOperand(Inst.getOperand(0));
10499     TmpInst.addOperand(Inst.getOperand(5));
10500     TmpInst.addOperand(Inst.getOperand(0));
10501     TmpInst.addOperand(Inst.getOperand(2));
10502     TmpInst.addOperand(Inst.getOperand(3));
10503     TmpInst.addOperand(Inst.getOperand(4));
10504     Inst = TmpInst;
10505     return true;
10506   }
10507   case ARM::t2ADDspImm:
10508   case ARM::t2SUBspImm: {
10509     // Prefer T1 encoding if possible
10510     if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10511       break;
10512     unsigned V = Inst.getOperand(2).getImm();
10513     if (V & 3 || V > ((1 << 7) - 1) << 2)
10514       break;
10515     MCInst TmpInst;
10516     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10517                                                           : ARM::tSUBspi);
10518     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10519     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10520     TmpInst.addOperand(MCOperand::createImm(V / 4));   // immediate
10521     TmpInst.addOperand(Inst.getOperand(3));            // pred
10522     TmpInst.addOperand(Inst.getOperand(4));
10523     Inst = TmpInst;
10524     return true;
10525   }
10526   case ARM::t2ADDrr: {
10527     // If the destination and first source operand are the same, and
10528     // there's no setting of the flags, use encoding T2 instead of T3.
10529     // Note that this is only for ADD, not SUB. This mirrors the system
10530     // 'as' behaviour.  Also take advantage of ADD being commutative.
10531     // Make sure the wide encoding wasn't explicit.
10532     bool Swap = false;
10533     auto DestReg = Inst.getOperand(0).getReg();
10534     bool Transform = DestReg == Inst.getOperand(1).getReg();
10535     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10536       Transform = true;
10537       Swap = true;
10538     }
10539     if (!Transform ||
10540         Inst.getOperand(5).getReg() != 0 ||
10541         HasWideQualifier)
10542       break;
10543     MCInst TmpInst;
10544     TmpInst.setOpcode(ARM::tADDhirr);
10545     TmpInst.addOperand(Inst.getOperand(0));
10546     TmpInst.addOperand(Inst.getOperand(0));
10547     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10548     TmpInst.addOperand(Inst.getOperand(3));
10549     TmpInst.addOperand(Inst.getOperand(4));
10550     Inst = TmpInst;
10551     return true;
10552   }
10553   case ARM::tADDrSP:
10554     // If the non-SP source operand and the destination operand are not the
10555     // same, we need to use the 32-bit encoding if it's available.
10556     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10557       Inst.setOpcode(ARM::t2ADDrr);
10558       Inst.addOperand(MCOperand::createReg(0)); // cc_out
10559       return true;
10560     }
10561     break;
10562   case ARM::tB:
10563     // A Thumb conditional branch outside of an IT block is a tBcc.
10564     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10565       Inst.setOpcode(ARM::tBcc);
10566       return true;
10567     }
10568     break;
10569   case ARM::t2B:
10570     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10571     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10572       Inst.setOpcode(ARM::t2Bcc);
10573       return true;
10574     }
10575     break;
10576   case ARM::t2Bcc:
10577     // If the conditional is AL or we're in an IT block, we really want t2B.
10578     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10579       Inst.setOpcode(ARM::t2B);
10580       return true;
10581     }
10582     break;
10583   case ARM::tBcc:
10584     // If the conditional is AL, we really want tB.
10585     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10586       Inst.setOpcode(ARM::tB);
10587       return true;
10588     }
10589     break;
10590   case ARM::tLDMIA: {
10591     // If the register list contains any high registers, or if the writeback
10592     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10593     // instead if we're in Thumb2. Otherwise, this should have generated
10594     // an error in validateInstruction().
10595     unsigned Rn = Inst.getOperand(0).getReg();
10596     bool hasWritebackToken =
10597         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
10598          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
10599     bool listContainsBase;
10600     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10601         (!listContainsBase && !hasWritebackToken) ||
10602         (listContainsBase && hasWritebackToken)) {
10603       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10604       assert(isThumbTwo());
10605       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10606       // If we're switching to the updating version, we need to insert
10607       // the writeback tied operand.
10608       if (hasWritebackToken)
10609         Inst.insert(Inst.begin(),
10610                     MCOperand::createReg(Inst.getOperand(0).getReg()));
10611       return true;
10612     }
10613     break;
10614   }
10615   case ARM::tSTMIA_UPD: {
10616     // If the register list contains any high registers, we need to use
10617     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10618     // should have generated an error in validateInstruction().
10619     unsigned Rn = Inst.getOperand(0).getReg();
10620     bool listContainsBase;
10621     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10622       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10623       assert(isThumbTwo());
10624       Inst.setOpcode(ARM::t2STMIA_UPD);
10625       return true;
10626     }
10627     break;
10628   }
10629   case ARM::tPOP: {
10630     bool listContainsBase;
10631     // If the register list contains any high registers, we need to use
10632     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10633     // should have generated an error in validateInstruction().
10634     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10635       return false;
10636     assert(isThumbTwo());
10637     Inst.setOpcode(ARM::t2LDMIA_UPD);
10638     // Add the base register and writeback operands.
10639     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10640     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10641     return true;
10642   }
10643   case ARM::tPUSH: {
10644     bool listContainsBase;
10645     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10646       return false;
10647     assert(isThumbTwo());
10648     Inst.setOpcode(ARM::t2STMDB_UPD);
10649     // Add the base register and writeback operands.
10650     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10651     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10652     return true;
10653   }
10654   case ARM::t2MOVi:
10655     // If we can use the 16-bit encoding and the user didn't explicitly
10656     // request the 32-bit variant, transform it here.
10657     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10658         (Inst.getOperand(1).isImm() &&
10659          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10660         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10661         !HasWideQualifier) {
10662       // The operands aren't in the same order for tMOVi8...
10663       MCInst TmpInst;
10664       TmpInst.setOpcode(ARM::tMOVi8);
10665       TmpInst.addOperand(Inst.getOperand(0));
10666       TmpInst.addOperand(Inst.getOperand(4));
10667       TmpInst.addOperand(Inst.getOperand(1));
10668       TmpInst.addOperand(Inst.getOperand(2));
10669       TmpInst.addOperand(Inst.getOperand(3));
10670       Inst = TmpInst;
10671       return true;
10672     }
10673     break;
10674 
10675   case ARM::t2MOVr:
10676     // If we can use the 16-bit encoding and the user didn't explicitly
10677     // request the 32-bit variant, transform it here.
10678     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10679         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10680         Inst.getOperand(2).getImm() == ARMCC::AL &&
10681         Inst.getOperand(4).getReg() == ARM::CPSR &&
10682         !HasWideQualifier) {
10683       // The operands aren't the same for tMOV[S]r... (no cc_out)
10684       MCInst TmpInst;
10685       unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10686       TmpInst.setOpcode(Op);
10687       TmpInst.addOperand(Inst.getOperand(0));
10688       TmpInst.addOperand(Inst.getOperand(1));
10689       if (Op == ARM::tMOVr) {
10690         TmpInst.addOperand(Inst.getOperand(2));
10691         TmpInst.addOperand(Inst.getOperand(3));
10692       }
10693       Inst = TmpInst;
10694       return true;
10695     }
10696     break;
10697 
10698   case ARM::t2SXTH:
10699   case ARM::t2SXTB:
10700   case ARM::t2UXTH:
10701   case ARM::t2UXTB:
10702     // If we can use the 16-bit encoding and the user didn't explicitly
10703     // request the 32-bit variant, transform it here.
10704     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10705         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10706         Inst.getOperand(2).getImm() == 0 &&
10707         !HasWideQualifier) {
10708       unsigned NewOpc;
10709       switch (Inst.getOpcode()) {
10710       default: llvm_unreachable("Illegal opcode!");
10711       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10712       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10713       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10714       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10715       }
10716       // The operands aren't the same for thumb1 (no rotate operand).
10717       MCInst TmpInst;
10718       TmpInst.setOpcode(NewOpc);
10719       TmpInst.addOperand(Inst.getOperand(0));
10720       TmpInst.addOperand(Inst.getOperand(1));
10721       TmpInst.addOperand(Inst.getOperand(3));
10722       TmpInst.addOperand(Inst.getOperand(4));
10723       Inst = TmpInst;
10724       return true;
10725     }
10726     break;
10727 
10728   case ARM::MOVsi: {
10729     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10730     // rrx shifts and asr/lsr of #32 is encoded as 0
10731     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10732       return false;
10733     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10734       // Shifting by zero is accepted as a vanilla 'MOVr'
10735       MCInst TmpInst;
10736       TmpInst.setOpcode(ARM::MOVr);
10737       TmpInst.addOperand(Inst.getOperand(0));
10738       TmpInst.addOperand(Inst.getOperand(1));
10739       TmpInst.addOperand(Inst.getOperand(3));
10740       TmpInst.addOperand(Inst.getOperand(4));
10741       TmpInst.addOperand(Inst.getOperand(5));
10742       Inst = TmpInst;
10743       return true;
10744     }
10745     return false;
10746   }
10747   case ARM::ANDrsi:
10748   case ARM::ORRrsi:
10749   case ARM::EORrsi:
10750   case ARM::BICrsi:
10751   case ARM::SUBrsi:
10752   case ARM::ADDrsi: {
10753     unsigned newOpc;
10754     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
10755     if (SOpc == ARM_AM::rrx) return false;
10756     switch (Inst.getOpcode()) {
10757     default: llvm_unreachable("unexpected opcode!");
10758     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10759     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10760     case ARM::EORrsi: newOpc = ARM::EORrr; break;
10761     case ARM::BICrsi: newOpc = ARM::BICrr; break;
10762     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10763     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10764     }
10765     // If the shift is by zero, use the non-shifted instruction definition.
10766     // The exception is for right shifts, where 0 == 32
10767     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10768         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10769       MCInst TmpInst;
10770       TmpInst.setOpcode(newOpc);
10771       TmpInst.addOperand(Inst.getOperand(0));
10772       TmpInst.addOperand(Inst.getOperand(1));
10773       TmpInst.addOperand(Inst.getOperand(2));
10774       TmpInst.addOperand(Inst.getOperand(4));
10775       TmpInst.addOperand(Inst.getOperand(5));
10776       TmpInst.addOperand(Inst.getOperand(6));
10777       Inst = TmpInst;
10778       return true;
10779     }
10780     return false;
10781   }
10782   case ARM::ITasm:
10783   case ARM::t2IT: {
10784     // Set up the IT block state according to the IT instruction we just
10785     // matched.
10786     assert(!inITBlock() && "nested IT blocks?!");
10787     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10788                          Inst.getOperand(1).getImm());
10789     break;
10790   }
10791   case ARM::t2LSLrr:
10792   case ARM::t2LSRrr:
10793   case ARM::t2ASRrr:
10794   case ARM::t2SBCrr:
10795   case ARM::t2RORrr:
10796   case ARM::t2BICrr:
10797     // Assemblers should use the narrow encodings of these instructions when permissible.
10798     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10799          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10800         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10801         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10802         !HasWideQualifier) {
10803       unsigned NewOpc;
10804       switch (Inst.getOpcode()) {
10805         default: llvm_unreachable("unexpected opcode");
10806         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10807         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10808         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10809         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10810         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10811         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10812       }
10813       MCInst TmpInst;
10814       TmpInst.setOpcode(NewOpc);
10815       TmpInst.addOperand(Inst.getOperand(0));
10816       TmpInst.addOperand(Inst.getOperand(5));
10817       TmpInst.addOperand(Inst.getOperand(1));
10818       TmpInst.addOperand(Inst.getOperand(2));
10819       TmpInst.addOperand(Inst.getOperand(3));
10820       TmpInst.addOperand(Inst.getOperand(4));
10821       Inst = TmpInst;
10822       return true;
10823     }
10824     return false;
10825 
10826   case ARM::t2ANDrr:
10827   case ARM::t2EORrr:
10828   case ARM::t2ADCrr:
10829   case ARM::t2ORRrr:
10830     // Assemblers should use the narrow encodings of these instructions when permissible.
10831     // These instructions are special in that they are commutable, so shorter encodings
10832     // are available more often.
10833     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10834          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10835         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10836          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10837         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10838         !HasWideQualifier) {
10839       unsigned NewOpc;
10840       switch (Inst.getOpcode()) {
10841         default: llvm_unreachable("unexpected opcode");
10842         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10843         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10844         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10845         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10846       }
10847       MCInst TmpInst;
10848       TmpInst.setOpcode(NewOpc);
10849       TmpInst.addOperand(Inst.getOperand(0));
10850       TmpInst.addOperand(Inst.getOperand(5));
10851       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
10852         TmpInst.addOperand(Inst.getOperand(1));
10853         TmpInst.addOperand(Inst.getOperand(2));
10854       } else {
10855         TmpInst.addOperand(Inst.getOperand(2));
10856         TmpInst.addOperand(Inst.getOperand(1));
10857       }
10858       TmpInst.addOperand(Inst.getOperand(3));
10859       TmpInst.addOperand(Inst.getOperand(4));
10860       Inst = TmpInst;
10861       return true;
10862     }
10863     return false;
10864   case ARM::MVE_VPST:
10865   case ARM::MVE_VPTv16i8:
10866   case ARM::MVE_VPTv8i16:
10867   case ARM::MVE_VPTv4i32:
10868   case ARM::MVE_VPTv16u8:
10869   case ARM::MVE_VPTv8u16:
10870   case ARM::MVE_VPTv4u32:
10871   case ARM::MVE_VPTv16s8:
10872   case ARM::MVE_VPTv8s16:
10873   case ARM::MVE_VPTv4s32:
10874   case ARM::MVE_VPTv4f32:
10875   case ARM::MVE_VPTv8f16:
10876   case ARM::MVE_VPTv16i8r:
10877   case ARM::MVE_VPTv8i16r:
10878   case ARM::MVE_VPTv4i32r:
10879   case ARM::MVE_VPTv16u8r:
10880   case ARM::MVE_VPTv8u16r:
10881   case ARM::MVE_VPTv4u32r:
10882   case ARM::MVE_VPTv16s8r:
10883   case ARM::MVE_VPTv8s16r:
10884   case ARM::MVE_VPTv4s32r:
10885   case ARM::MVE_VPTv4f32r:
10886   case ARM::MVE_VPTv8f16r: {
10887     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
10888     MCOperand &MO = Inst.getOperand(0);
10889     VPTState.Mask = MO.getImm();
10890     VPTState.CurPosition = 0;
10891     break;
10892   }
10893   }
10894   return false;
10895 }
10896 
checkTargetMatchPredicate(MCInst & Inst)10897 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
10898   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
10899   // suffix depending on whether they're in an IT block or not.
10900   unsigned Opc = Inst.getOpcode();
10901   const MCInstrDesc &MCID = MII.get(Opc);
10902   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
10903     assert(MCID.hasOptionalDef() &&
10904            "optionally flag setting instruction missing optional def operand");
10905     assert(MCID.NumOperands == Inst.getNumOperands() &&
10906            "operand count mismatch!");
10907     // Find the optional-def operand (cc_out).
10908     unsigned OpNo;
10909     for (OpNo = 0;
10910          OpNo < MCID.NumOperands && !MCID.operands()[OpNo].isOptionalDef();
10911          ++OpNo)
10912       ;
10913     // If we're parsing Thumb1, reject it completely.
10914     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
10915       return Match_RequiresFlagSetting;
10916     // If we're parsing Thumb2, which form is legal depends on whether we're
10917     // in an IT block.
10918     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
10919         !inITBlock())
10920       return Match_RequiresITBlock;
10921     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
10922         inITBlock())
10923       return Match_RequiresNotITBlock;
10924     // LSL with zero immediate is not allowed in an IT block
10925     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
10926       return Match_RequiresNotITBlock;
10927   } else if (isThumbOne()) {
10928     // Some high-register supporting Thumb1 encodings only allow both registers
10929     // to be from r0-r7 when in Thumb2.
10930     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
10931         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10932         isARMLowRegister(Inst.getOperand(2).getReg()))
10933       return Match_RequiresThumb2;
10934     // Others only require ARMv6 or later.
10935     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
10936              isARMLowRegister(Inst.getOperand(0).getReg()) &&
10937              isARMLowRegister(Inst.getOperand(1).getReg()))
10938       return Match_RequiresV6;
10939   }
10940 
10941   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
10942   // than the loop below can handle, so it uses the GPRnopc register class and
10943   // we do SP handling here.
10944   if (Opc == ARM::t2MOVr && !hasV8Ops())
10945   {
10946     // SP as both source and destination is not allowed
10947     if (Inst.getOperand(0).getReg() == ARM::SP &&
10948         Inst.getOperand(1).getReg() == ARM::SP)
10949       return Match_RequiresV8;
10950     // When flags-setting SP as either source or destination is not allowed
10951     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
10952         (Inst.getOperand(0).getReg() == ARM::SP ||
10953          Inst.getOperand(1).getReg() == ARM::SP))
10954       return Match_RequiresV8;
10955   }
10956 
10957   switch (Inst.getOpcode()) {
10958   case ARM::VMRS:
10959   case ARM::VMSR:
10960   case ARM::VMRS_FPCXTS:
10961   case ARM::VMRS_FPCXTNS:
10962   case ARM::VMSR_FPCXTS:
10963   case ARM::VMSR_FPCXTNS:
10964   case ARM::VMRS_FPSCR_NZCVQC:
10965   case ARM::VMSR_FPSCR_NZCVQC:
10966   case ARM::FMSTAT:
10967   case ARM::VMRS_VPR:
10968   case ARM::VMRS_P0:
10969   case ARM::VMSR_VPR:
10970   case ARM::VMSR_P0:
10971     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
10972     // ARMv8-A.
10973     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
10974         (isThumb() && !hasV8Ops()))
10975       return Match_InvalidOperand;
10976     break;
10977   case ARM::t2TBB:
10978   case ARM::t2TBH:
10979     // Rn = sp is only allowed with ARMv8-A
10980     if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
10981       return Match_RequiresV8;
10982     break;
10983   default:
10984     break;
10985   }
10986 
10987   for (unsigned I = 0; I < MCID.NumOperands; ++I)
10988     if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
10989       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
10990       const auto &Op = Inst.getOperand(I);
10991       if (!Op.isReg()) {
10992         // This can happen in awkward cases with tied operands, e.g. a
10993         // writeback load/store with a complex addressing mode in
10994         // which there's an output operand corresponding to the
10995         // updated written-back base register: the Tablegen-generated
10996         // AsmMatcher will have written a placeholder operand to that
10997         // slot in the form of an immediate 0, because it can't
10998         // generate the register part of the complex addressing-mode
10999         // operand ahead of time.
11000         continue;
11001       }
11002 
11003       unsigned Reg = Op.getReg();
11004       if ((Reg == ARM::SP) && !hasV8Ops())
11005         return Match_RequiresV8;
11006       else if (Reg == ARM::PC)
11007         return Match_InvalidOperand;
11008     }
11009 
11010   return Match_Success;
11011 }
11012 
11013 namespace llvm {
11014 
IsCPSRDead(const MCInst * Instr)11015 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11016   return true; // In an assembly source, no need to second-guess
11017 }
11018 
11019 } // end namespace llvm
11020 
11021 // Returns true if Inst is unpredictable if it is in and IT block, but is not
11022 // the last instruction in the block.
isITBlockTerminator(MCInst & Inst) const11023 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11024   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11025 
11026   // All branch & call instructions terminate IT blocks with the exception of
11027   // SVC.
11028   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11029       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11030     return true;
11031 
11032   // Any arithmetic instruction which writes to the PC also terminates the IT
11033   // block.
11034   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11035     return true;
11036 
11037   return false;
11038 }
11039 
MatchInstruction(OperandVector & Operands,MCInst & Inst,SmallVectorImpl<NearMissInfo> & NearMisses,bool MatchingInlineAsm,bool & EmitInITBlock,MCStreamer & Out)11040 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11041                                           SmallVectorImpl<NearMissInfo> &NearMisses,
11042                                           bool MatchingInlineAsm,
11043                                           bool &EmitInITBlock,
11044                                           MCStreamer &Out) {
11045   // If we can't use an implicit IT block here, just match as normal.
11046   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11047     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11048 
11049   // Try to match the instruction in an extension of the current IT block (if
11050   // there is one).
11051   if (inImplicitITBlock()) {
11052     extendImplicitITBlock(ITState.Cond);
11053     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11054             Match_Success) {
11055       // The match succeded, but we still have to check that the instruction is
11056       // valid in this implicit IT block.
11057       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11058       if (MCID.isPredicable()) {
11059         ARMCC::CondCodes InstCond =
11060             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11061                 .getImm();
11062         ARMCC::CondCodes ITCond = currentITCond();
11063         if (InstCond == ITCond) {
11064           EmitInITBlock = true;
11065           return Match_Success;
11066         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11067           invertCurrentITCondition();
11068           EmitInITBlock = true;
11069           return Match_Success;
11070         }
11071       }
11072     }
11073     rewindImplicitITPosition();
11074   }
11075 
11076   // Finish the current IT block, and try to match outside any IT block.
11077   flushPendingInstructions(Out);
11078   unsigned PlainMatchResult =
11079       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11080   if (PlainMatchResult == Match_Success) {
11081     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11082     if (MCID.isPredicable()) {
11083       ARMCC::CondCodes InstCond =
11084           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11085               .getImm();
11086       // Some forms of the branch instruction have their own condition code
11087       // fields, so can be conditionally executed without an IT block.
11088       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11089         EmitInITBlock = false;
11090         return Match_Success;
11091       }
11092       if (InstCond == ARMCC::AL) {
11093         EmitInITBlock = false;
11094         return Match_Success;
11095       }
11096     } else {
11097       EmitInITBlock = false;
11098       return Match_Success;
11099     }
11100   }
11101 
11102   // Try to match in a new IT block. The matcher doesn't check the actual
11103   // condition, so we create an IT block with a dummy condition, and fix it up
11104   // once we know the actual condition.
11105   startImplicitITBlock();
11106   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11107       Match_Success) {
11108     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11109     if (MCID.isPredicable()) {
11110       ITState.Cond =
11111           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11112               .getImm();
11113       EmitInITBlock = true;
11114       return Match_Success;
11115     }
11116   }
11117   discardImplicitITBlock();
11118 
11119   // If none of these succeed, return the error we got when trying to match
11120   // outside any IT blocks.
11121   EmitInITBlock = false;
11122   return PlainMatchResult;
11123 }
11124 
11125 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11126                                          unsigned VariantID = 0);
11127 
11128 static const char *getSubtargetFeatureName(uint64_t Val);
MatchAndEmitInstruction(SMLoc IDLoc,unsigned & Opcode,OperandVector & Operands,MCStreamer & Out,uint64_t & ErrorInfo,bool MatchingInlineAsm)11129 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11130                                            OperandVector &Operands,
11131                                            MCStreamer &Out, uint64_t &ErrorInfo,
11132                                            bool MatchingInlineAsm) {
11133   MCInst Inst;
11134   unsigned MatchResult;
11135   bool PendConditionalInstruction = false;
11136 
11137   SmallVector<NearMissInfo, 4> NearMisses;
11138   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11139                                  PendConditionalInstruction, Out);
11140 
11141   switch (MatchResult) {
11142   case Match_Success:
11143     LLVM_DEBUG(dbgs() << "Parsed as: ";
11144                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11145                dbgs() << "\n");
11146 
11147     // Context sensitive operand constraints aren't handled by the matcher,
11148     // so check them here.
11149     if (validateInstruction(Inst, Operands)) {
11150       // Still progress the IT block, otherwise one wrong condition causes
11151       // nasty cascading errors.
11152       forwardITPosition();
11153       forwardVPTPosition();
11154       return true;
11155     }
11156 
11157     {
11158       // Some instructions need post-processing to, for example, tweak which
11159       // encoding is selected. Loop on it while changes happen so the
11160       // individual transformations can chain off each other. E.g.,
11161       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11162       while (processInstruction(Inst, Operands, Out))
11163         LLVM_DEBUG(dbgs() << "Changed to: ";
11164                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11165                    dbgs() << "\n");
11166     }
11167 
11168     // Only move forward at the very end so that everything in validate
11169     // and process gets a consistent answer about whether we're in an IT
11170     // block.
11171     forwardITPosition();
11172     forwardVPTPosition();
11173 
11174     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11175     // doesn't actually encode.
11176     if (Inst.getOpcode() == ARM::ITasm)
11177       return false;
11178 
11179     Inst.setLoc(IDLoc);
11180     if (PendConditionalInstruction) {
11181       PendingConditionalInsts.push_back(Inst);
11182       if (isITBlockFull() || isITBlockTerminator(Inst))
11183         flushPendingInstructions(Out);
11184     } else {
11185       Out.emitInstruction(Inst, getSTI());
11186     }
11187     return false;
11188   case Match_NearMisses:
11189     ReportNearMisses(NearMisses, IDLoc, Operands);
11190     return true;
11191   case Match_MnemonicFail: {
11192     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11193     std::string Suggestion = ARMMnemonicSpellCheck(
11194       ((ARMOperand &)*Operands[0]).getToken(), FBS);
11195     return Error(IDLoc, "invalid instruction" + Suggestion,
11196                  ((ARMOperand &)*Operands[0]).getLocRange());
11197   }
11198   }
11199 
11200   llvm_unreachable("Implement any new match types added!");
11201 }
11202 
11203 /// parseDirective parses the arm specific directives
ParseDirective(AsmToken DirectiveID)11204 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11205   const MCContext::Environment Format = getContext().getObjectFileType();
11206   bool IsMachO = Format == MCContext::IsMachO;
11207   bool IsCOFF = Format == MCContext::IsCOFF;
11208 
11209   std::string IDVal = DirectiveID.getIdentifier().lower();
11210   if (IDVal == ".word")
11211     parseLiteralValues(4, DirectiveID.getLoc());
11212   else if (IDVal == ".short" || IDVal == ".hword")
11213     parseLiteralValues(2, DirectiveID.getLoc());
11214   else if (IDVal == ".thumb")
11215     parseDirectiveThumb(DirectiveID.getLoc());
11216   else if (IDVal == ".arm")
11217     parseDirectiveARM(DirectiveID.getLoc());
11218   else if (IDVal == ".thumb_func")
11219     parseDirectiveThumbFunc(DirectiveID.getLoc());
11220   else if (IDVal == ".code")
11221     parseDirectiveCode(DirectiveID.getLoc());
11222   else if (IDVal == ".syntax")
11223     parseDirectiveSyntax(DirectiveID.getLoc());
11224   else if (IDVal == ".unreq")
11225     parseDirectiveUnreq(DirectiveID.getLoc());
11226   else if (IDVal == ".fnend")
11227     parseDirectiveFnEnd(DirectiveID.getLoc());
11228   else if (IDVal == ".cantunwind")
11229     parseDirectiveCantUnwind(DirectiveID.getLoc());
11230   else if (IDVal == ".personality")
11231     parseDirectivePersonality(DirectiveID.getLoc());
11232   else if (IDVal == ".handlerdata")
11233     parseDirectiveHandlerData(DirectiveID.getLoc());
11234   else if (IDVal == ".setfp")
11235     parseDirectiveSetFP(DirectiveID.getLoc());
11236   else if (IDVal == ".pad")
11237     parseDirectivePad(DirectiveID.getLoc());
11238   else if (IDVal == ".save")
11239     parseDirectiveRegSave(DirectiveID.getLoc(), false);
11240   else if (IDVal == ".vsave")
11241     parseDirectiveRegSave(DirectiveID.getLoc(), true);
11242   else if (IDVal == ".ltorg" || IDVal == ".pool")
11243     parseDirectiveLtorg(DirectiveID.getLoc());
11244   else if (IDVal == ".even")
11245     parseDirectiveEven(DirectiveID.getLoc());
11246   else if (IDVal == ".personalityindex")
11247     parseDirectivePersonalityIndex(DirectiveID.getLoc());
11248   else if (IDVal == ".unwind_raw")
11249     parseDirectiveUnwindRaw(DirectiveID.getLoc());
11250   else if (IDVal == ".movsp")
11251     parseDirectiveMovSP(DirectiveID.getLoc());
11252   else if (IDVal == ".arch_extension")
11253     parseDirectiveArchExtension(DirectiveID.getLoc());
11254   else if (IDVal == ".align")
11255     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11256   else if (IDVal == ".thumb_set")
11257     parseDirectiveThumbSet(DirectiveID.getLoc());
11258   else if (IDVal == ".inst")
11259     parseDirectiveInst(DirectiveID.getLoc());
11260   else if (IDVal == ".inst.n")
11261     parseDirectiveInst(DirectiveID.getLoc(), 'n');
11262   else if (IDVal == ".inst.w")
11263     parseDirectiveInst(DirectiveID.getLoc(), 'w');
11264   else if (!IsMachO && !IsCOFF) {
11265     if (IDVal == ".arch")
11266       parseDirectiveArch(DirectiveID.getLoc());
11267     else if (IDVal == ".cpu")
11268       parseDirectiveCPU(DirectiveID.getLoc());
11269     else if (IDVal == ".eabi_attribute")
11270       parseDirectiveEabiAttr(DirectiveID.getLoc());
11271     else if (IDVal == ".fpu")
11272       parseDirectiveFPU(DirectiveID.getLoc());
11273     else if (IDVal == ".fnstart")
11274       parseDirectiveFnStart(DirectiveID.getLoc());
11275     else if (IDVal == ".object_arch")
11276       parseDirectiveObjectArch(DirectiveID.getLoc());
11277     else if (IDVal == ".tlsdescseq")
11278       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11279     else
11280       return true;
11281   } else if (IsCOFF) {
11282     if (IDVal == ".seh_stackalloc")
11283       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11284     else if (IDVal == ".seh_stackalloc_w")
11285       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11286     else if (IDVal == ".seh_save_regs")
11287       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11288     else if (IDVal == ".seh_save_regs_w")
11289       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11290     else if (IDVal == ".seh_save_sp")
11291       parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11292     else if (IDVal == ".seh_save_fregs")
11293       parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11294     else if (IDVal == ".seh_save_lr")
11295       parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11296     else if (IDVal == ".seh_endprologue")
11297       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11298     else if (IDVal == ".seh_endprologue_fragment")
11299       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11300     else if (IDVal == ".seh_nop")
11301       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11302     else if (IDVal == ".seh_nop_w")
11303       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11304     else if (IDVal == ".seh_startepilogue")
11305       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11306     else if (IDVal == ".seh_startepilogue_cond")
11307       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11308     else if (IDVal == ".seh_endepilogue")
11309       parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11310     else if (IDVal == ".seh_custom")
11311       parseDirectiveSEHCustom(DirectiveID.getLoc());
11312     else
11313       return true;
11314   } else
11315     return true;
11316   return false;
11317 }
11318 
11319 /// parseLiteralValues
11320 ///  ::= .hword expression [, expression]*
11321 ///  ::= .short expression [, expression]*
11322 ///  ::= .word expression [, expression]*
parseLiteralValues(unsigned Size,SMLoc L)11323 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11324   auto parseOne = [&]() -> bool {
11325     const MCExpr *Value;
11326     if (getParser().parseExpression(Value))
11327       return true;
11328     getParser().getStreamer().emitValue(Value, Size, L);
11329     return false;
11330   };
11331   return (parseMany(parseOne));
11332 }
11333 
11334 /// parseDirectiveThumb
11335 ///  ::= .thumb
parseDirectiveThumb(SMLoc L)11336 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11337   if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11338     return true;
11339 
11340   if (!isThumb())
11341     SwitchMode();
11342 
11343   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11344   getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11345   return false;
11346 }
11347 
11348 /// parseDirectiveARM
11349 ///  ::= .arm
parseDirectiveARM(SMLoc L)11350 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11351   if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11352     return true;
11353 
11354   if (isThumb())
11355     SwitchMode();
11356   getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11357   getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11358   return false;
11359 }
11360 
doBeforeLabelEmit(MCSymbol * Symbol,SMLoc IDLoc)11361 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11362   // We need to flush the current implicit IT block on a label, because it is
11363   // not legal to branch into an IT block.
11364   flushPendingInstructions(getStreamer());
11365 }
11366 
onLabelParsed(MCSymbol * Symbol)11367 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11368   if (NextSymbolIsThumb) {
11369     getParser().getStreamer().emitThumbFunc(Symbol);
11370     NextSymbolIsThumb = false;
11371   }
11372 }
11373 
11374 /// parseDirectiveThumbFunc
11375 ///  ::= .thumbfunc symbol_name
parseDirectiveThumbFunc(SMLoc L)11376 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11377   MCAsmParser &Parser = getParser();
11378   const auto Format = getContext().getObjectFileType();
11379   bool IsMachO = Format == MCContext::IsMachO;
11380 
11381   // Darwin asm has (optionally) function name after .thumb_func direction
11382   // ELF doesn't
11383 
11384   if (IsMachO) {
11385     if (Parser.getTok().is(AsmToken::Identifier) ||
11386         Parser.getTok().is(AsmToken::String)) {
11387       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11388           Parser.getTok().getIdentifier());
11389       getParser().getStreamer().emitThumbFunc(Func);
11390       Parser.Lex();
11391       if (parseEOL())
11392         return true;
11393       return false;
11394     }
11395   }
11396 
11397   if (parseEOL())
11398     return true;
11399 
11400   // .thumb_func implies .thumb
11401   if (!isThumb())
11402     SwitchMode();
11403 
11404   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11405 
11406   NextSymbolIsThumb = true;
11407   return false;
11408 }
11409 
11410 /// parseDirectiveSyntax
11411 ///  ::= .syntax unified | divided
parseDirectiveSyntax(SMLoc L)11412 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11413   MCAsmParser &Parser = getParser();
11414   const AsmToken &Tok = Parser.getTok();
11415   if (Tok.isNot(AsmToken::Identifier)) {
11416     Error(L, "unexpected token in .syntax directive");
11417     return false;
11418   }
11419 
11420   StringRef Mode = Tok.getString();
11421   Parser.Lex();
11422   if (check(Mode == "divided" || Mode == "DIVIDED", L,
11423             "'.syntax divided' arm assembly not supported") ||
11424       check(Mode != "unified" && Mode != "UNIFIED", L,
11425             "unrecognized syntax mode in .syntax directive") ||
11426       parseEOL())
11427     return true;
11428 
11429   // TODO tell the MC streamer the mode
11430   // getParser().getStreamer().Emit???();
11431   return false;
11432 }
11433 
11434 /// parseDirectiveCode
11435 ///  ::= .code 16 | 32
parseDirectiveCode(SMLoc L)11436 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11437   MCAsmParser &Parser = getParser();
11438   const AsmToken &Tok = Parser.getTok();
11439   if (Tok.isNot(AsmToken::Integer))
11440     return Error(L, "unexpected token in .code directive");
11441   int64_t Val = Parser.getTok().getIntVal();
11442   if (Val != 16 && Val != 32) {
11443     Error(L, "invalid operand to .code directive");
11444     return false;
11445   }
11446   Parser.Lex();
11447 
11448   if (parseEOL())
11449     return true;
11450 
11451   if (Val == 16) {
11452     if (!hasThumb())
11453       return Error(L, "target does not support Thumb mode");
11454 
11455     if (!isThumb())
11456       SwitchMode();
11457     getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11458   } else {
11459     if (!hasARM())
11460       return Error(L, "target does not support ARM mode");
11461 
11462     if (isThumb())
11463       SwitchMode();
11464     getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11465   }
11466 
11467   return false;
11468 }
11469 
11470 /// parseDirectiveReq
11471 ///  ::= name .req registername
parseDirectiveReq(StringRef Name,SMLoc L)11472 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11473   MCAsmParser &Parser = getParser();
11474   Parser.Lex(); // Eat the '.req' token.
11475   MCRegister Reg;
11476   SMLoc SRegLoc, ERegLoc;
11477   if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11478             "register name expected") ||
11479       parseEOL())
11480     return true;
11481 
11482   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11483     return Error(SRegLoc,
11484                  "redefinition of '" + Name + "' does not match original.");
11485 
11486   return false;
11487 }
11488 
11489 /// parseDirectiveUneq
11490 ///  ::= .unreq registername
parseDirectiveUnreq(SMLoc L)11491 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11492   MCAsmParser &Parser = getParser();
11493   if (Parser.getTok().isNot(AsmToken::Identifier))
11494     return Error(L, "unexpected input in .unreq directive.");
11495   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11496   Parser.Lex(); // Eat the identifier.
11497   return parseEOL();
11498 }
11499 
11500 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11501 // before, if supported by the new target, or emit mapping symbols for the mode
11502 // switch.
FixModeAfterArchChange(bool WasThumb,SMLoc Loc)11503 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11504   if (WasThumb != isThumb()) {
11505     if (WasThumb && hasThumb()) {
11506       // Stay in Thumb mode
11507       SwitchMode();
11508     } else if (!WasThumb && hasARM()) {
11509       // Stay in ARM mode
11510       SwitchMode();
11511     } else {
11512       // Mode switch forced, because the new arch doesn't support the old mode.
11513       getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11514                                                             : MCAF_Code32);
11515       // Warn about the implcit mode switch. GAS does not switch modes here,
11516       // but instead stays in the old mode, reporting an error on any following
11517       // instructions as the mode does not exist on the target.
11518       Warning(Loc, Twine("new target does not support ") +
11519                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11520                        (!WasThumb ? "thumb" : "arm") + " mode");
11521     }
11522   }
11523 }
11524 
11525 /// parseDirectiveArch
11526 ///  ::= .arch token
parseDirectiveArch(SMLoc L)11527 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11528   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11529   ARM::ArchKind ID = ARM::parseArch(Arch);
11530 
11531   if (ID == ARM::ArchKind::INVALID)
11532     return Error(L, "Unknown arch name");
11533 
11534   bool WasThumb = isThumb();
11535   Triple T;
11536   MCSubtargetInfo &STI = copySTI();
11537   STI.setDefaultFeatures("", /*TuneCPU*/ "",
11538                          ("+" + ARM::getArchName(ID)).str());
11539   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11540   FixModeAfterArchChange(WasThumb, L);
11541 
11542   getTargetStreamer().emitArch(ID);
11543   return false;
11544 }
11545 
11546 /// parseDirectiveEabiAttr
11547 ///  ::= .eabi_attribute int, int [, "str"]
11548 ///  ::= .eabi_attribute Tag_name, int [, "str"]
parseDirectiveEabiAttr(SMLoc L)11549 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11550   MCAsmParser &Parser = getParser();
11551   int64_t Tag;
11552   SMLoc TagLoc;
11553   TagLoc = Parser.getTok().getLoc();
11554   if (Parser.getTok().is(AsmToken::Identifier)) {
11555     StringRef Name = Parser.getTok().getIdentifier();
11556     std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11557         Name, ARMBuildAttrs::getARMAttributeTags());
11558     if (!Ret) {
11559       Error(TagLoc, "attribute name not recognised: " + Name);
11560       return false;
11561     }
11562     Tag = *Ret;
11563     Parser.Lex();
11564   } else {
11565     const MCExpr *AttrExpr;
11566 
11567     TagLoc = Parser.getTok().getLoc();
11568     if (Parser.parseExpression(AttrExpr))
11569       return true;
11570 
11571     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11572     if (check(!CE, TagLoc, "expected numeric constant"))
11573       return true;
11574 
11575     Tag = CE->getValue();
11576   }
11577 
11578   if (Parser.parseComma())
11579     return true;
11580 
11581   StringRef StringValue = "";
11582   bool IsStringValue = false;
11583 
11584   int64_t IntegerValue = 0;
11585   bool IsIntegerValue = false;
11586 
11587   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11588     IsStringValue = true;
11589   else if (Tag == ARMBuildAttrs::compatibility) {
11590     IsStringValue = true;
11591     IsIntegerValue = true;
11592   } else if (Tag < 32 || Tag % 2 == 0)
11593     IsIntegerValue = true;
11594   else if (Tag % 2 == 1)
11595     IsStringValue = true;
11596   else
11597     llvm_unreachable("invalid tag type");
11598 
11599   if (IsIntegerValue) {
11600     const MCExpr *ValueExpr;
11601     SMLoc ValueExprLoc = Parser.getTok().getLoc();
11602     if (Parser.parseExpression(ValueExpr))
11603       return true;
11604 
11605     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11606     if (!CE)
11607       return Error(ValueExprLoc, "expected numeric constant");
11608     IntegerValue = CE->getValue();
11609   }
11610 
11611   if (Tag == ARMBuildAttrs::compatibility) {
11612     if (Parser.parseComma())
11613       return true;
11614   }
11615 
11616   std::string EscapedValue;
11617   if (IsStringValue) {
11618     if (Parser.getTok().isNot(AsmToken::String))
11619       return Error(Parser.getTok().getLoc(), "bad string constant");
11620 
11621     if (Tag == ARMBuildAttrs::also_compatible_with) {
11622       if (Parser.parseEscapedString(EscapedValue))
11623         return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11624 
11625       StringValue = EscapedValue;
11626     } else {
11627       StringValue = Parser.getTok().getStringContents();
11628       Parser.Lex();
11629     }
11630   }
11631 
11632   if (Parser.parseEOL())
11633     return true;
11634 
11635   if (IsIntegerValue && IsStringValue) {
11636     assert(Tag == ARMBuildAttrs::compatibility);
11637     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11638   } else if (IsIntegerValue)
11639     getTargetStreamer().emitAttribute(Tag, IntegerValue);
11640   else if (IsStringValue)
11641     getTargetStreamer().emitTextAttribute(Tag, StringValue);
11642   return false;
11643 }
11644 
11645 /// parseDirectiveCPU
11646 ///  ::= .cpu str
parseDirectiveCPU(SMLoc L)11647 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11648   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11649   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11650 
11651   // FIXME: This is using table-gen data, but should be moved to
11652   // ARMTargetParser once that is table-gen'd.
11653   if (!getSTI().isCPUStringValid(CPU))
11654     return Error(L, "Unknown CPU name");
11655 
11656   bool WasThumb = isThumb();
11657   MCSubtargetInfo &STI = copySTI();
11658   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11659   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11660   FixModeAfterArchChange(WasThumb, L);
11661 
11662   return false;
11663 }
11664 
11665 /// parseDirectiveFPU
11666 ///  ::= .fpu str
parseDirectiveFPU(SMLoc L)11667 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11668   SMLoc FPUNameLoc = getTok().getLoc();
11669   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11670 
11671   ARM::FPUKind ID = ARM::parseFPU(FPU);
11672   std::vector<StringRef> Features;
11673   if (!ARM::getFPUFeatures(ID, Features))
11674     return Error(FPUNameLoc, "Unknown FPU name");
11675 
11676   MCSubtargetInfo &STI = copySTI();
11677   for (auto Feature : Features)
11678     STI.ApplyFeatureFlag(Feature);
11679   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11680 
11681   getTargetStreamer().emitFPU(ID);
11682   return false;
11683 }
11684 
11685 /// parseDirectiveFnStart
11686 ///  ::= .fnstart
parseDirectiveFnStart(SMLoc L)11687 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11688   if (parseEOL())
11689     return true;
11690 
11691   if (UC.hasFnStart()) {
11692     Error(L, ".fnstart starts before the end of previous one");
11693     UC.emitFnStartLocNotes();
11694     return true;
11695   }
11696 
11697   // Reset the unwind directives parser state
11698   UC.reset();
11699 
11700   getTargetStreamer().emitFnStart();
11701 
11702   UC.recordFnStart(L);
11703   return false;
11704 }
11705 
11706 /// parseDirectiveFnEnd
11707 ///  ::= .fnend
parseDirectiveFnEnd(SMLoc L)11708 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11709   if (parseEOL())
11710     return true;
11711   // Check the ordering of unwind directives
11712   if (!UC.hasFnStart())
11713     return Error(L, ".fnstart must precede .fnend directive");
11714 
11715   // Reset the unwind directives parser state
11716   getTargetStreamer().emitFnEnd();
11717 
11718   UC.reset();
11719   return false;
11720 }
11721 
11722 /// parseDirectiveCantUnwind
11723 ///  ::= .cantunwind
parseDirectiveCantUnwind(SMLoc L)11724 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11725   if (parseEOL())
11726     return true;
11727 
11728   UC.recordCantUnwind(L);
11729   // Check the ordering of unwind directives
11730   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11731     return true;
11732 
11733   if (UC.hasHandlerData()) {
11734     Error(L, ".cantunwind can't be used with .handlerdata directive");
11735     UC.emitHandlerDataLocNotes();
11736     return true;
11737   }
11738   if (UC.hasPersonality()) {
11739     Error(L, ".cantunwind can't be used with .personality directive");
11740     UC.emitPersonalityLocNotes();
11741     return true;
11742   }
11743 
11744   getTargetStreamer().emitCantUnwind();
11745   return false;
11746 }
11747 
11748 /// parseDirectivePersonality
11749 ///  ::= .personality name
parseDirectivePersonality(SMLoc L)11750 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11751   MCAsmParser &Parser = getParser();
11752   bool HasExistingPersonality = UC.hasPersonality();
11753 
11754   // Parse the name of the personality routine
11755   if (Parser.getTok().isNot(AsmToken::Identifier))
11756     return Error(L, "unexpected input in .personality directive.");
11757   StringRef Name(Parser.getTok().getIdentifier());
11758   Parser.Lex();
11759 
11760   if (parseEOL())
11761     return true;
11762 
11763   UC.recordPersonality(L);
11764 
11765   // Check the ordering of unwind directives
11766   if (!UC.hasFnStart())
11767     return Error(L, ".fnstart must precede .personality directive");
11768   if (UC.cantUnwind()) {
11769     Error(L, ".personality can't be used with .cantunwind directive");
11770     UC.emitCantUnwindLocNotes();
11771     return true;
11772   }
11773   if (UC.hasHandlerData()) {
11774     Error(L, ".personality must precede .handlerdata directive");
11775     UC.emitHandlerDataLocNotes();
11776     return true;
11777   }
11778   if (HasExistingPersonality) {
11779     Error(L, "multiple personality directives");
11780     UC.emitPersonalityLocNotes();
11781     return true;
11782   }
11783 
11784   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
11785   getTargetStreamer().emitPersonality(PR);
11786   return false;
11787 }
11788 
11789 /// parseDirectiveHandlerData
11790 ///  ::= .handlerdata
parseDirectiveHandlerData(SMLoc L)11791 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
11792   if (parseEOL())
11793     return true;
11794 
11795   UC.recordHandlerData(L);
11796   // Check the ordering of unwind directives
11797   if (!UC.hasFnStart())
11798     return Error(L, ".fnstart must precede .personality directive");
11799   if (UC.cantUnwind()) {
11800     Error(L, ".handlerdata can't be used with .cantunwind directive");
11801     UC.emitCantUnwindLocNotes();
11802     return true;
11803   }
11804 
11805   getTargetStreamer().emitHandlerData();
11806   return false;
11807 }
11808 
11809 /// parseDirectiveSetFP
11810 ///  ::= .setfp fpreg, spreg [, offset]
parseDirectiveSetFP(SMLoc L)11811 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
11812   MCAsmParser &Parser = getParser();
11813   // Check the ordering of unwind directives
11814   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
11815       check(UC.hasHandlerData(), L,
11816             ".setfp must precede .handlerdata directive"))
11817     return true;
11818 
11819   // Parse fpreg
11820   SMLoc FPRegLoc = Parser.getTok().getLoc();
11821   int FPReg = tryParseRegister();
11822 
11823   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
11824       Parser.parseComma())
11825     return true;
11826 
11827   // Parse spreg
11828   SMLoc SPRegLoc = Parser.getTok().getLoc();
11829   int SPReg = tryParseRegister();
11830   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
11831       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11832             "register should be either $sp or the latest fp register"))
11833     return true;
11834 
11835   // Update the frame pointer register
11836   UC.saveFPReg(FPReg);
11837 
11838   // Parse offset
11839   int64_t Offset = 0;
11840   if (Parser.parseOptionalToken(AsmToken::Comma)) {
11841     if (Parser.getTok().isNot(AsmToken::Hash) &&
11842         Parser.getTok().isNot(AsmToken::Dollar))
11843       return Error(Parser.getTok().getLoc(), "'#' expected");
11844     Parser.Lex(); // skip hash token.
11845 
11846     const MCExpr *OffsetExpr;
11847     SMLoc ExLoc = Parser.getTok().getLoc();
11848     SMLoc EndLoc;
11849     if (getParser().parseExpression(OffsetExpr, EndLoc))
11850       return Error(ExLoc, "malformed setfp offset");
11851     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11852     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
11853       return true;
11854     Offset = CE->getValue();
11855   }
11856 
11857   if (Parser.parseEOL())
11858     return true;
11859 
11860   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
11861                                 static_cast<unsigned>(SPReg), Offset);
11862   return false;
11863 }
11864 
11865 /// parseDirective
11866 ///  ::= .pad offset
parseDirectivePad(SMLoc L)11867 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
11868   MCAsmParser &Parser = getParser();
11869   // Check the ordering of unwind directives
11870   if (!UC.hasFnStart())
11871     return Error(L, ".fnstart must precede .pad directive");
11872   if (UC.hasHandlerData())
11873     return Error(L, ".pad must precede .handlerdata directive");
11874 
11875   // Parse the offset
11876   if (Parser.getTok().isNot(AsmToken::Hash) &&
11877       Parser.getTok().isNot(AsmToken::Dollar))
11878     return Error(Parser.getTok().getLoc(), "'#' expected");
11879   Parser.Lex(); // skip hash token.
11880 
11881   const MCExpr *OffsetExpr;
11882   SMLoc ExLoc = Parser.getTok().getLoc();
11883   SMLoc EndLoc;
11884   if (getParser().parseExpression(OffsetExpr, EndLoc))
11885     return Error(ExLoc, "malformed pad offset");
11886   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11887   if (!CE)
11888     return Error(ExLoc, "pad offset must be an immediate");
11889 
11890   if (parseEOL())
11891     return true;
11892 
11893   getTargetStreamer().emitPad(CE->getValue());
11894   return false;
11895 }
11896 
11897 /// parseDirectiveRegSave
11898 ///  ::= .save  { registers }
11899 ///  ::= .vsave { registers }
parseDirectiveRegSave(SMLoc L,bool IsVector)11900 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
11901   // Check the ordering of unwind directives
11902   if (!UC.hasFnStart())
11903     return Error(L, ".fnstart must precede .save or .vsave directives");
11904   if (UC.hasHandlerData())
11905     return Error(L, ".save or .vsave must precede .handlerdata directive");
11906 
11907   // RAII object to make sure parsed operands are deleted.
11908   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
11909 
11910   // Parse the register list
11911   if (parseRegisterList(Operands, true, true) || parseEOL())
11912     return true;
11913   ARMOperand &Op = (ARMOperand &)*Operands[0];
11914   if (!IsVector && !Op.isRegList())
11915     return Error(L, ".save expects GPR registers");
11916   if (IsVector && !Op.isDPRRegList())
11917     return Error(L, ".vsave expects DPR registers");
11918 
11919   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
11920   return false;
11921 }
11922 
11923 /// parseDirectiveInst
11924 ///  ::= .inst opcode [, ...]
11925 ///  ::= .inst.n opcode [, ...]
11926 ///  ::= .inst.w opcode [, ...]
parseDirectiveInst(SMLoc Loc,char Suffix)11927 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
11928   int Width = 4;
11929 
11930   if (isThumb()) {
11931     switch (Suffix) {
11932     case 'n':
11933       Width = 2;
11934       break;
11935     case 'w':
11936       break;
11937     default:
11938       Width = 0;
11939       break;
11940     }
11941   } else {
11942     if (Suffix)
11943       return Error(Loc, "width suffixes are invalid in ARM mode");
11944   }
11945 
11946   auto parseOne = [&]() -> bool {
11947     const MCExpr *Expr;
11948     if (getParser().parseExpression(Expr))
11949       return true;
11950     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
11951     if (!Value) {
11952       return Error(Loc, "expected constant expression");
11953     }
11954 
11955     char CurSuffix = Suffix;
11956     switch (Width) {
11957     case 2:
11958       if (Value->getValue() > 0xffff)
11959         return Error(Loc, "inst.n operand is too big, use inst.w instead");
11960       break;
11961     case 4:
11962       if (Value->getValue() > 0xffffffff)
11963         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
11964                               " operand is too big");
11965       break;
11966     case 0:
11967       // Thumb mode, no width indicated. Guess from the opcode, if possible.
11968       if (Value->getValue() < 0xe800)
11969         CurSuffix = 'n';
11970       else if (Value->getValue() >= 0xe8000000)
11971         CurSuffix = 'w';
11972       else
11973         return Error(Loc, "cannot determine Thumb instruction size, "
11974                           "use inst.n/inst.w instead");
11975       break;
11976     default:
11977       llvm_unreachable("only supported widths are 2 and 4");
11978     }
11979 
11980     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
11981     forwardITPosition();
11982     forwardVPTPosition();
11983     return false;
11984   };
11985 
11986   if (parseOptionalToken(AsmToken::EndOfStatement))
11987     return Error(Loc, "expected expression following directive");
11988   if (parseMany(parseOne))
11989     return true;
11990   return false;
11991 }
11992 
11993 /// parseDirectiveLtorg
11994 ///  ::= .ltorg | .pool
parseDirectiveLtorg(SMLoc L)11995 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
11996   if (parseEOL())
11997     return true;
11998   getTargetStreamer().emitCurrentConstantPool();
11999   return false;
12000 }
12001 
parseDirectiveEven(SMLoc L)12002 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12003   const MCSection *Section = getStreamer().getCurrentSectionOnly();
12004 
12005   if (parseEOL())
12006     return true;
12007 
12008   if (!Section) {
12009     getStreamer().initSections(false, getSTI());
12010     Section = getStreamer().getCurrentSectionOnly();
12011   }
12012 
12013   assert(Section && "must have section to emit alignment");
12014   if (Section->useCodeAlign())
12015     getStreamer().emitCodeAlignment(Align(2), &getSTI());
12016   else
12017     getStreamer().emitValueToAlignment(Align(2));
12018 
12019   return false;
12020 }
12021 
12022 /// parseDirectivePersonalityIndex
12023 ///   ::= .personalityindex index
parseDirectivePersonalityIndex(SMLoc L)12024 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12025   MCAsmParser &Parser = getParser();
12026   bool HasExistingPersonality = UC.hasPersonality();
12027 
12028   const MCExpr *IndexExpression;
12029   SMLoc IndexLoc = Parser.getTok().getLoc();
12030   if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12031     return true;
12032   }
12033 
12034   UC.recordPersonalityIndex(L);
12035 
12036   if (!UC.hasFnStart()) {
12037     return Error(L, ".fnstart must precede .personalityindex directive");
12038   }
12039   if (UC.cantUnwind()) {
12040     Error(L, ".personalityindex cannot be used with .cantunwind");
12041     UC.emitCantUnwindLocNotes();
12042     return true;
12043   }
12044   if (UC.hasHandlerData()) {
12045     Error(L, ".personalityindex must precede .handlerdata directive");
12046     UC.emitHandlerDataLocNotes();
12047     return true;
12048   }
12049   if (HasExistingPersonality) {
12050     Error(L, "multiple personality directives");
12051     UC.emitPersonalityLocNotes();
12052     return true;
12053   }
12054 
12055   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12056   if (!CE)
12057     return Error(IndexLoc, "index must be a constant number");
12058   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12059     return Error(IndexLoc,
12060                  "personality routine index should be in range [0-3]");
12061 
12062   getTargetStreamer().emitPersonalityIndex(CE->getValue());
12063   return false;
12064 }
12065 
12066 /// parseDirectiveUnwindRaw
12067 ///   ::= .unwind_raw offset, opcode [, opcode...]
parseDirectiveUnwindRaw(SMLoc L)12068 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12069   MCAsmParser &Parser = getParser();
12070   int64_t StackOffset;
12071   const MCExpr *OffsetExpr;
12072   SMLoc OffsetLoc = getLexer().getLoc();
12073 
12074   if (!UC.hasFnStart())
12075     return Error(L, ".fnstart must precede .unwind_raw directives");
12076   if (getParser().parseExpression(OffsetExpr))
12077     return Error(OffsetLoc, "expected expression");
12078 
12079   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12080   if (!CE)
12081     return Error(OffsetLoc, "offset must be a constant");
12082 
12083   StackOffset = CE->getValue();
12084 
12085   if (Parser.parseComma())
12086     return true;
12087 
12088   SmallVector<uint8_t, 16> Opcodes;
12089 
12090   auto parseOne = [&]() -> bool {
12091     const MCExpr *OE = nullptr;
12092     SMLoc OpcodeLoc = getLexer().getLoc();
12093     if (check(getLexer().is(AsmToken::EndOfStatement) ||
12094                   Parser.parseExpression(OE),
12095               OpcodeLoc, "expected opcode expression"))
12096       return true;
12097     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12098     if (!OC)
12099       return Error(OpcodeLoc, "opcode value must be a constant");
12100     const int64_t Opcode = OC->getValue();
12101     if (Opcode & ~0xff)
12102       return Error(OpcodeLoc, "invalid opcode");
12103     Opcodes.push_back(uint8_t(Opcode));
12104     return false;
12105   };
12106 
12107   // Must have at least 1 element
12108   SMLoc OpcodeLoc = getLexer().getLoc();
12109   if (parseOptionalToken(AsmToken::EndOfStatement))
12110     return Error(OpcodeLoc, "expected opcode expression");
12111   if (parseMany(parseOne))
12112     return true;
12113 
12114   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12115   return false;
12116 }
12117 
12118 /// parseDirectiveTLSDescSeq
12119 ///   ::= .tlsdescseq tls-variable
parseDirectiveTLSDescSeq(SMLoc L)12120 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12121   MCAsmParser &Parser = getParser();
12122 
12123   if (getLexer().isNot(AsmToken::Identifier))
12124     return TokError("expected variable after '.tlsdescseq' directive");
12125 
12126   const MCSymbolRefExpr *SRE =
12127     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
12128                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
12129   Lex();
12130 
12131   if (parseEOL())
12132     return true;
12133 
12134   getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12135   return false;
12136 }
12137 
12138 /// parseDirectiveMovSP
12139 ///  ::= .movsp reg [, #offset]
parseDirectiveMovSP(SMLoc L)12140 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12141   MCAsmParser &Parser = getParser();
12142   if (!UC.hasFnStart())
12143     return Error(L, ".fnstart must precede .movsp directives");
12144   if (UC.getFPReg() != ARM::SP)
12145     return Error(L, "unexpected .movsp directive");
12146 
12147   SMLoc SPRegLoc = Parser.getTok().getLoc();
12148   int SPReg = tryParseRegister();
12149   if (SPReg == -1)
12150     return Error(SPRegLoc, "register expected");
12151   if (SPReg == ARM::SP || SPReg == ARM::PC)
12152     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12153 
12154   int64_t Offset = 0;
12155   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12156     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12157       return true;
12158 
12159     const MCExpr *OffsetExpr;
12160     SMLoc OffsetLoc = Parser.getTok().getLoc();
12161 
12162     if (Parser.parseExpression(OffsetExpr))
12163       return Error(OffsetLoc, "malformed offset expression");
12164 
12165     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12166     if (!CE)
12167       return Error(OffsetLoc, "offset must be an immediate constant");
12168 
12169     Offset = CE->getValue();
12170   }
12171 
12172   if (parseEOL())
12173     return true;
12174 
12175   getTargetStreamer().emitMovSP(SPReg, Offset);
12176   UC.saveFPReg(SPReg);
12177 
12178   return false;
12179 }
12180 
12181 /// parseDirectiveObjectArch
12182 ///   ::= .object_arch name
parseDirectiveObjectArch(SMLoc L)12183 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12184   MCAsmParser &Parser = getParser();
12185   if (getLexer().isNot(AsmToken::Identifier))
12186     return Error(getLexer().getLoc(), "unexpected token");
12187 
12188   StringRef Arch = Parser.getTok().getString();
12189   SMLoc ArchLoc = Parser.getTok().getLoc();
12190   Lex();
12191 
12192   ARM::ArchKind ID = ARM::parseArch(Arch);
12193 
12194   if (ID == ARM::ArchKind::INVALID)
12195     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12196   if (parseToken(AsmToken::EndOfStatement))
12197     return true;
12198 
12199   getTargetStreamer().emitObjectArch(ID);
12200   return false;
12201 }
12202 
12203 /// parseDirectiveAlign
12204 ///   ::= .align
parseDirectiveAlign(SMLoc L)12205 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12206   // NOTE: if this is not the end of the statement, fall back to the target
12207   // agnostic handling for this directive which will correctly handle this.
12208   if (parseOptionalToken(AsmToken::EndOfStatement)) {
12209     // '.align' is target specifically handled to mean 2**2 byte alignment.
12210     const MCSection *Section = getStreamer().getCurrentSectionOnly();
12211     assert(Section && "must have section to emit alignment");
12212     if (Section->useCodeAlign())
12213       getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12214     else
12215       getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12216     return false;
12217   }
12218   return true;
12219 }
12220 
12221 /// parseDirectiveThumbSet
12222 ///  ::= .thumb_set name, value
parseDirectiveThumbSet(SMLoc L)12223 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12224   MCAsmParser &Parser = getParser();
12225 
12226   StringRef Name;
12227   if (check(Parser.parseIdentifier(Name),
12228             "expected identifier after '.thumb_set'") ||
12229       Parser.parseComma())
12230     return true;
12231 
12232   MCSymbol *Sym;
12233   const MCExpr *Value;
12234   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12235                                                Parser, Sym, Value))
12236     return true;
12237 
12238   getTargetStreamer().emitThumbSet(Sym, Value);
12239   return false;
12240 }
12241 
12242 /// parseDirectiveSEHAllocStack
12243 /// ::= .seh_stackalloc
12244 /// ::= .seh_stackalloc_w
parseDirectiveSEHAllocStack(SMLoc L,bool Wide)12245 bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12246   int64_t Size;
12247   if (parseImmExpr(Size))
12248     return true;
12249   getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12250   return false;
12251 }
12252 
12253 /// parseDirectiveSEHSaveRegs
12254 /// ::= .seh_save_regs
12255 /// ::= .seh_save_regs_w
parseDirectiveSEHSaveRegs(SMLoc L,bool Wide)12256 bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12257   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12258 
12259   if (parseRegisterList(Operands) || parseEOL())
12260     return true;
12261   ARMOperand &Op = (ARMOperand &)*Operands[0];
12262   if (!Op.isRegList())
12263     return Error(L, ".seh_save_regs{_w} expects GPR registers");
12264   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12265   uint32_t Mask = 0;
12266   for (size_t i = 0; i < RegList.size(); ++i) {
12267     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12268     if (Reg == 15) // pc -> lr
12269       Reg = 14;
12270     if (Reg == 13)
12271       return Error(L, ".seh_save_regs{_w} can't include SP");
12272     assert(Reg < 16U && "Register out of range");
12273     unsigned Bit = (1u << Reg);
12274     Mask |= Bit;
12275   }
12276   if (!Wide && (Mask & 0x1f00) != 0)
12277     return Error(L,
12278                  ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12279   getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12280   return false;
12281 }
12282 
12283 /// parseDirectiveSEHSaveSP
12284 /// ::= .seh_save_sp
parseDirectiveSEHSaveSP(SMLoc L)12285 bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12286   int Reg = tryParseRegister();
12287   if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12288     return Error(L, "expected GPR");
12289   unsigned Index = MRI->getEncodingValue(Reg);
12290   if (Index > 14 || Index == 13)
12291     return Error(L, "invalid register for .seh_save_sp");
12292   getTargetStreamer().emitARMWinCFISaveSP(Index);
12293   return false;
12294 }
12295 
12296 /// parseDirectiveSEHSaveFRegs
12297 /// ::= .seh_save_fregs
parseDirectiveSEHSaveFRegs(SMLoc L)12298 bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12299   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12300 
12301   if (parseRegisterList(Operands) || parseEOL())
12302     return true;
12303   ARMOperand &Op = (ARMOperand &)*Operands[0];
12304   if (!Op.isDPRRegList())
12305     return Error(L, ".seh_save_fregs expects DPR registers");
12306   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12307   uint32_t Mask = 0;
12308   for (size_t i = 0; i < RegList.size(); ++i) {
12309     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12310     assert(Reg < 32U && "Register out of range");
12311     unsigned Bit = (1u << Reg);
12312     Mask |= Bit;
12313   }
12314 
12315   if (Mask == 0)
12316     return Error(L, ".seh_save_fregs missing registers");
12317 
12318   unsigned First = 0;
12319   while ((Mask & 1) == 0) {
12320     First++;
12321     Mask >>= 1;
12322   }
12323   if (((Mask + 1) & Mask) != 0)
12324     return Error(L,
12325                  ".seh_save_fregs must take a contiguous range of registers");
12326   unsigned Last = First;
12327   while ((Mask & 2) != 0) {
12328     Last++;
12329     Mask >>= 1;
12330   }
12331   if (First < 16 && Last >= 16)
12332     return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12333   getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12334   return false;
12335 }
12336 
12337 /// parseDirectiveSEHSaveLR
12338 /// ::= .seh_save_lr
parseDirectiveSEHSaveLR(SMLoc L)12339 bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12340   int64_t Offset;
12341   if (parseImmExpr(Offset))
12342     return true;
12343   getTargetStreamer().emitARMWinCFISaveLR(Offset);
12344   return false;
12345 }
12346 
12347 /// parseDirectiveSEHPrologEnd
12348 /// ::= .seh_endprologue
12349 /// ::= .seh_endprologue_fragment
parseDirectiveSEHPrologEnd(SMLoc L,bool Fragment)12350 bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12351   getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12352   return false;
12353 }
12354 
12355 /// parseDirectiveSEHNop
12356 /// ::= .seh_nop
12357 /// ::= .seh_nop_w
parseDirectiveSEHNop(SMLoc L,bool Wide)12358 bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12359   getTargetStreamer().emitARMWinCFINop(Wide);
12360   return false;
12361 }
12362 
12363 /// parseDirectiveSEHEpilogStart
12364 /// ::= .seh_startepilogue
12365 /// ::= .seh_startepilogue_cond
parseDirectiveSEHEpilogStart(SMLoc L,bool Condition)12366 bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12367   unsigned CC = ARMCC::AL;
12368   if (Condition) {
12369     MCAsmParser &Parser = getParser();
12370     SMLoc S = Parser.getTok().getLoc();
12371     const AsmToken &Tok = Parser.getTok();
12372     if (!Tok.is(AsmToken::Identifier))
12373       return Error(S, ".seh_startepilogue_cond missing condition");
12374     CC = ARMCondCodeFromString(Tok.getString());
12375     if (CC == ~0U)
12376       return Error(S, "invalid condition");
12377     Parser.Lex(); // Eat the token.
12378   }
12379 
12380   getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12381   return false;
12382 }
12383 
12384 /// parseDirectiveSEHEpilogEnd
12385 /// ::= .seh_endepilogue
parseDirectiveSEHEpilogEnd(SMLoc L)12386 bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12387   getTargetStreamer().emitARMWinCFIEpilogEnd();
12388   return false;
12389 }
12390 
12391 /// parseDirectiveSEHCustom
12392 /// ::= .seh_custom
parseDirectiveSEHCustom(SMLoc L)12393 bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12394   unsigned Opcode = 0;
12395   do {
12396     int64_t Byte;
12397     if (parseImmExpr(Byte))
12398       return true;
12399     if (Byte > 0xff || Byte < 0)
12400       return Error(L, "Invalid byte value in .seh_custom");
12401     if (Opcode > 0x00ffffff)
12402       return Error(L, "Too many bytes in .seh_custom");
12403     // Store the bytes as one big endian number in Opcode. In a multi byte
12404     // opcode sequence, the first byte can't be zero.
12405     Opcode = (Opcode << 8) | Byte;
12406   } while (parseOptionalToken(AsmToken::Comma));
12407   getTargetStreamer().emitARMWinCFICustom(Opcode);
12408   return false;
12409 }
12410 
12411 /// Force static initialization.
LLVMInitializeARMAsmParser()12412 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser() {
12413   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12414   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12415   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12416   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12417 }
12418 
12419 #define GET_REGISTER_MATCHER
12420 #define GET_SUBTARGET_FEATURE_NAME
12421 #define GET_MATCHER_IMPLEMENTATION
12422 #define GET_MNEMONIC_SPELL_CHECKER
12423 #include "ARMGenAsmMatcher.inc"
12424 
12425 // Some diagnostics need to vary with subtarget features, so they are handled
12426 // here. For example, the DPR class has either 16 or 32 registers, depending
12427 // on the FPU available.
12428 const char *
getCustomOperandDiag(ARMMatchResultTy MatchError)12429 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12430   switch (MatchError) {
12431   // rGPR contains sp starting with ARMv8.
12432   case Match_rGPR:
12433     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12434                       : "operand must be a register in range [r0, r12] or r14";
12435   // DPR contains 16 registers for some FPUs, and 32 for others.
12436   case Match_DPR:
12437     return hasD32() ? "operand must be a register in range [d0, d31]"
12438                     : "operand must be a register in range [d0, d15]";
12439   case Match_DPR_RegList:
12440     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12441                     : "operand must be a list of registers in range [d0, d15]";
12442 
12443   // For all other diags, use the static string from tablegen.
12444   default:
12445     return getMatchKindDiag(MatchError);
12446   }
12447 }
12448 
12449 // Process the list of near-misses, throwing away ones we don't want to report
12450 // to the user, and converting the rest to a source location and string that
12451 // should be reported.
12452 void
FilterNearMisses(SmallVectorImpl<NearMissInfo> & NearMissesIn,SmallVectorImpl<NearMissMessage> & NearMissesOut,SMLoc IDLoc,OperandVector & Operands)12453 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12454                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
12455                                SMLoc IDLoc, OperandVector &Operands) {
12456   // TODO: If operand didn't match, sub in a dummy one and run target
12457   // predicate, so that we can avoid reporting near-misses that are invalid?
12458   // TODO: Many operand types dont have SuperClasses set, so we report
12459   // redundant ones.
12460   // TODO: Some operands are superclasses of registers (e.g.
12461   // MCK_RegShiftedImm), we don't have any way to represent that currently.
12462   // TODO: This is not all ARM-specific, can some of it be factored out?
12463 
12464   // Record some information about near-misses that we have already seen, so
12465   // that we can avoid reporting redundant ones. For example, if there are
12466   // variants of an instruction that take 8- and 16-bit immediates, we want
12467   // to only report the widest one.
12468   std::multimap<unsigned, unsigned> OperandMissesSeen;
12469   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12470   bool ReportedTooFewOperands = false;
12471 
12472   // Process the near-misses in reverse order, so that we see more general ones
12473   // first, and so can avoid emitting more specific ones.
12474   for (NearMissInfo &I : reverse(NearMissesIn)) {
12475     switch (I.getKind()) {
12476     case NearMissInfo::NearMissOperand: {
12477       SMLoc OperandLoc =
12478           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12479       const char *OperandDiag =
12480           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12481 
12482       // If we have already emitted a message for a superclass, don't also report
12483       // the sub-class. We consider all operand classes that we don't have a
12484       // specialised diagnostic for to be equal for the propose of this check,
12485       // so that we don't report the generic error multiple times on the same
12486       // operand.
12487       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12488       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12489       if (std::any_of(PrevReports.first, PrevReports.second,
12490                       [DupCheckMatchClass](
12491                           const std::pair<unsigned, unsigned> Pair) {
12492             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12493               return Pair.second == DupCheckMatchClass;
12494             else
12495               return isSubclass((MatchClassKind)DupCheckMatchClass,
12496                                 (MatchClassKind)Pair.second);
12497           }))
12498         break;
12499       OperandMissesSeen.insert(
12500           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12501 
12502       NearMissMessage Message;
12503       Message.Loc = OperandLoc;
12504       if (OperandDiag) {
12505         Message.Message = OperandDiag;
12506       } else if (I.getOperandClass() == InvalidMatchClass) {
12507         Message.Message = "too many operands for instruction";
12508       } else {
12509         Message.Message = "invalid operand for instruction";
12510         LLVM_DEBUG(
12511             dbgs() << "Missing diagnostic string for operand class "
12512                    << getMatchClassName((MatchClassKind)I.getOperandClass())
12513                    << I.getOperandClass() << ", error " << I.getOperandError()
12514                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12515       }
12516       NearMissesOut.emplace_back(Message);
12517       break;
12518     }
12519     case NearMissInfo::NearMissFeature: {
12520       const FeatureBitset &MissingFeatures = I.getFeatures();
12521       // Don't report the same set of features twice.
12522       if (FeatureMissesSeen.count(MissingFeatures))
12523         break;
12524       FeatureMissesSeen.insert(MissingFeatures);
12525 
12526       // Special case: don't report a feature set which includes arm-mode for
12527       // targets that don't have ARM mode.
12528       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12529         break;
12530       // Don't report any near-misses that both require switching instruction
12531       // set, and adding other subtarget features.
12532       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12533           MissingFeatures.count() > 1)
12534         break;
12535       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12536           MissingFeatures.count() > 1)
12537         break;
12538       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12539           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12540                                              Feature_IsThumbBit})).any())
12541         break;
12542       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12543         break;
12544 
12545       NearMissMessage Message;
12546       Message.Loc = IDLoc;
12547       raw_svector_ostream OS(Message.Message);
12548 
12549       OS << "instruction requires:";
12550       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12551         if (MissingFeatures.test(i))
12552           OS << ' ' << getSubtargetFeatureName(i);
12553 
12554       NearMissesOut.emplace_back(Message);
12555 
12556       break;
12557     }
12558     case NearMissInfo::NearMissPredicate: {
12559       NearMissMessage Message;
12560       Message.Loc = IDLoc;
12561       switch (I.getPredicateError()) {
12562       case Match_RequiresNotITBlock:
12563         Message.Message = "flag setting instruction only valid outside IT block";
12564         break;
12565       case Match_RequiresITBlock:
12566         Message.Message = "instruction only valid inside IT block";
12567         break;
12568       case Match_RequiresV6:
12569         Message.Message = "instruction variant requires ARMv6 or later";
12570         break;
12571       case Match_RequiresThumb2:
12572         Message.Message = "instruction variant requires Thumb2";
12573         break;
12574       case Match_RequiresV8:
12575         Message.Message = "instruction variant requires ARMv8 or later";
12576         break;
12577       case Match_RequiresFlagSetting:
12578         Message.Message = "no flag-preserving variant of this instruction available";
12579         break;
12580       case Match_InvalidOperand:
12581         Message.Message = "invalid operand for instruction";
12582         break;
12583       default:
12584         llvm_unreachable("Unhandled target predicate error");
12585         break;
12586       }
12587       NearMissesOut.emplace_back(Message);
12588       break;
12589     }
12590     case NearMissInfo::NearMissTooFewOperands: {
12591       if (!ReportedTooFewOperands) {
12592         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12593         NearMissesOut.emplace_back(NearMissMessage{
12594             EndLoc, StringRef("too few operands for instruction")});
12595         ReportedTooFewOperands = true;
12596       }
12597       break;
12598     }
12599     case NearMissInfo::NoNearMiss:
12600       // This should never leave the matcher.
12601       llvm_unreachable("not a near-miss");
12602       break;
12603     }
12604   }
12605 }
12606 
ReportNearMisses(SmallVectorImpl<NearMissInfo> & NearMisses,SMLoc IDLoc,OperandVector & Operands)12607 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12608                                     SMLoc IDLoc, OperandVector &Operands) {
12609   SmallVector<NearMissMessage, 4> Messages;
12610   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12611 
12612   if (Messages.size() == 0) {
12613     // No near-misses were found, so the best we can do is "invalid
12614     // instruction".
12615     Error(IDLoc, "invalid instruction");
12616   } else if (Messages.size() == 1) {
12617     // One near miss was found, report it as the sole error.
12618     Error(Messages[0].Loc, Messages[0].Message);
12619   } else {
12620     // More than one near miss, so report a generic "invalid instruction"
12621     // error, followed by notes for each of the near-misses.
12622     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12623     for (auto &M : Messages) {
12624       Note(M.Loc, M.Message);
12625     }
12626   }
12627 }
12628 
enableArchExtFeature(StringRef Name,SMLoc & ExtLoc)12629 bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12630   // FIXME: This structure should be moved inside ARMTargetParser
12631   // when we start to table-generate them, and we can use the ARM
12632   // flags below, that were generated by table-gen.
12633   static const struct {
12634     const uint64_t Kind;
12635     const FeatureBitset ArchCheck;
12636     const FeatureBitset Features;
12637   } Extensions[] = {
12638       {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12639       {ARM::AEK_AES,
12640        {Feature_HasV8Bit},
12641        {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12642       {ARM::AEK_SHA2,
12643        {Feature_HasV8Bit},
12644        {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12645       {ARM::AEK_CRYPTO,
12646        {Feature_HasV8Bit},
12647        {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12648       {(ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP),
12649        {Feature_HasV8_1MMainlineBit},
12650        {ARM::HasMVEFloatOps}},
12651       {ARM::AEK_FP,
12652        {Feature_HasV8Bit},
12653        {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12654       {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12655        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12656        {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12657       {ARM::AEK_MP,
12658        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12659        {ARM::FeatureMP}},
12660       {ARM::AEK_SIMD,
12661        {Feature_HasV8Bit},
12662        {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12663       {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12664       // FIXME: Only available in A-class, isel not predicated
12665       {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12666       {ARM::AEK_FP16,
12667        {Feature_HasV8_2aBit},
12668        {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12669       {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12670       {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12671       {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12672       // FIXME: Unsupported extensions.
12673       {ARM::AEK_OS, {}, {}},
12674       {ARM::AEK_IWMMXT, {}, {}},
12675       {ARM::AEK_IWMMXT2, {}, {}},
12676       {ARM::AEK_MAVERICK, {}, {}},
12677       {ARM::AEK_XSCALE, {}, {}},
12678   };
12679   bool EnableFeature = !Name.consume_front_insensitive("no");
12680   uint64_t FeatureKind = ARM::parseArchExt(Name);
12681   if (FeatureKind == ARM::AEK_INVALID)
12682     return Error(ExtLoc, "unknown architectural extension: " + Name);
12683 
12684   for (const auto &Extension : Extensions) {
12685     if (Extension.Kind != FeatureKind)
12686       continue;
12687 
12688     if (Extension.Features.none())
12689       return Error(ExtLoc, "unsupported architectural extension: " + Name);
12690 
12691     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12692       return Error(ExtLoc, "architectural extension '" + Name +
12693                                "' is not "
12694                                "allowed for the current base architecture");
12695 
12696     MCSubtargetInfo &STI = copySTI();
12697     if (EnableFeature) {
12698       STI.SetFeatureBitsTransitively(Extension.Features);
12699     } else {
12700       STI.ClearFeatureBitsTransitively(Extension.Features);
12701     }
12702     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12703     setAvailableFeatures(Features);
12704     return true;
12705   }
12706   return false;
12707 }
12708 
12709 /// parseDirectiveArchExtension
12710 ///   ::= .arch_extension [no]feature
parseDirectiveArchExtension(SMLoc L)12711 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12712 
12713   MCAsmParser &Parser = getParser();
12714 
12715   if (getLexer().isNot(AsmToken::Identifier))
12716     return Error(getLexer().getLoc(), "expected architecture extension name");
12717 
12718   StringRef Name = Parser.getTok().getString();
12719   SMLoc ExtLoc = Parser.getTok().getLoc();
12720   Lex();
12721 
12722   if (parseEOL())
12723     return true;
12724 
12725   if (Name == "nocrypto") {
12726     enableArchExtFeature("nosha2", ExtLoc);
12727     enableArchExtFeature("noaes", ExtLoc);
12728   }
12729 
12730   if (enableArchExtFeature(Name, ExtLoc))
12731     return false;
12732 
12733   return Error(ExtLoc, "unknown architectural extension: " + Name);
12734 }
12735 
12736 // Define this matcher function after the auto-generated include so we
12737 // have the match class enum definitions.
validateTargetOperandClass(MCParsedAsmOperand & AsmOp,unsigned Kind)12738 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12739                                                   unsigned Kind) {
12740   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12741   // If the kind is a token for a literal immediate, check if our asm
12742   // operand matches. This is for InstAliases which have a fixed-value
12743   // immediate in the syntax.
12744   switch (Kind) {
12745   default: break;
12746   case MCK__HASH_0:
12747     if (Op.isImm())
12748       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12749         if (CE->getValue() == 0)
12750           return Match_Success;
12751     break;
12752   case MCK__HASH_8:
12753     if (Op.isImm())
12754       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12755         if (CE->getValue() == 8)
12756           return Match_Success;
12757     break;
12758   case MCK__HASH_16:
12759     if (Op.isImm())
12760       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12761         if (CE->getValue() == 16)
12762           return Match_Success;
12763     break;
12764   case MCK_ModImm:
12765     if (Op.isImm()) {
12766       const MCExpr *SOExpr = Op.getImm();
12767       int64_t Value;
12768       if (!SOExpr->evaluateAsAbsolute(Value))
12769         return Match_Success;
12770       assert((Value >= std::numeric_limits<int32_t>::min() &&
12771               Value <= std::numeric_limits<uint32_t>::max()) &&
12772              "expression value must be representable in 32 bits");
12773     }
12774     break;
12775   case MCK_rGPR:
12776     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
12777       return Match_Success;
12778     return Match_rGPR;
12779   case MCK_GPRPair:
12780     if (Op.isReg() &&
12781         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
12782       return Match_Success;
12783     break;
12784   }
12785   return Match_InvalidOperand;
12786 }
12787 
isMnemonicVPTPredicable(StringRef Mnemonic,StringRef ExtraToken)12788 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
12789                                            StringRef ExtraToken) {
12790   if (!hasMVE())
12791     return false;
12792 
12793   if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
12794       (Mnemonic.starts_with("vldrh") && Mnemonic != "vldrhi") ||
12795       (Mnemonic.starts_with("vmov") &&
12796        !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
12797          ExtraToken == ".8")) ||
12798       (Mnemonic.starts_with("vrint") && Mnemonic != "vrintr") ||
12799       (Mnemonic.starts_with("vstrh") && Mnemonic != "vstrhi"))
12800     return true;
12801 
12802   const char *predicable_prefixes[] = {
12803       "vabav",      "vabd",     "vabs",      "vadc",       "vadd",
12804       "vaddlv",     "vaddv",    "vand",      "vbic",       "vbrsr",
12805       "vcadd",      "vcls",     "vclz",      "vcmla",      "vcmp",
12806       "vcmul",      "vctp",     "vcvt",      "vddup",      "vdup",
12807       "vdwdup",     "veor",     "vfma",      "vfmas",      "vfms",
12808       "vhadd",      "vhcadd",   "vhsub",     "vidup",      "viwdup",
12809       "vldrb",      "vldrd",    "vldrw",     "vmax",       "vmaxa",
12810       "vmaxav",     "vmaxnm",   "vmaxnma",   "vmaxnmav",   "vmaxnmv",
12811       "vmaxv",      "vmin",     "vminav",    "vminnm",     "vminnmav",
12812       "vminnmv",    "vminv",    "vmla",      "vmladav",    "vmlaldav",
12813       "vmlalv",     "vmlas",    "vmlav",     "vmlsdav",    "vmlsldav",
12814       "vmovlb",     "vmovlt",   "vmovnb",    "vmovnt",     "vmul",
12815       "vmvn",       "vneg",     "vorn",      "vorr",       "vpnot",
12816       "vpsel",      "vqabs",    "vqadd",     "vqdmladh",   "vqdmlah",
12817       "vqdmlash",   "vqdmlsdh", "vqdmulh",   "vqdmull",    "vqmovn",
12818       "vqmovun",    "vqneg",    "vqrdmladh", "vqrdmlah",   "vqrdmlash",
12819       "vqrdmlsdh",  "vqrdmulh", "vqrshl",    "vqrshrn",    "vqrshrun",
12820       "vqshl",      "vqshrn",   "vqshrun",   "vqsub",      "vrev16",
12821       "vrev32",     "vrev64",   "vrhadd",    "vrmlaldavh", "vrmlalvh",
12822       "vrmlsldavh", "vrmulh",   "vrshl",     "vrshr",      "vrshrn",
12823       "vsbc",       "vshl",     "vshlc",     "vshll",      "vshr",
12824       "vshrn",      "vsli",     "vsri",      "vstrb",      "vstrd",
12825       "vstrw",      "vsub"};
12826 
12827   return std::any_of(
12828       std::begin(predicable_prefixes), std::end(predicable_prefixes),
12829       [&Mnemonic](const char *prefix) { return Mnemonic.starts_with(prefix); });
12830 }
12831