1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMBaseInstrInfo.h"
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "Utils/ARMBaseInfo.h"
18 #include "llvm/ADT/APFloat.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSet.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
37 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/MC/MCSection.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSubtargetInfo.h"
43 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/TargetRegistry.h"
45 #include "llvm/Support/ARMBuildAttributes.h"
46 #include "llvm/Support/ARMEHABI.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/SMLoc.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/TargetParser/SubtargetFeature.h"
55 #include "llvm/TargetParser/TargetParser.h"
56 #include "llvm/TargetParser/Triple.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <string>
65 #include <utility>
66 #include <vector>
67 
68 #define DEBUG_TYPE "asm-parser"
69 
70 using namespace llvm;
71 
72 namespace llvm {
73 struct ARMInstrTable {
74   MCInstrDesc Insts[4445];
75   MCOperandInfo OperandInfo[3026];
76   MCPhysReg ImplicitOps[130];
77 };
78 extern const ARMInstrTable ARMDescs;
79 } // end namespace llvm
80 
81 namespace {
82 
83 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
84 
85 static cl::opt<ImplicitItModeTy> ImplicitItMode(
86     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
87     cl::desc("Allow conditional instructions outdside of an IT block"),
88     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
89                           "Accept in both ISAs, emit implicit ITs in Thumb"),
90                clEnumValN(ImplicitItModeTy::Never, "never",
91                           "Warn in ARM, reject in Thumb"),
92                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
93                           "Accept in ARM, reject in Thumb"),
94                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
95                           "Warn in ARM, emit implicit ITs in Thumb")));
96 
97 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
98                                         cl::init(false));
99 
100 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
101 
102 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
103   // Position==0 means we're not in an IT block at all. Position==1
104   // means we want the first state bit, which is always 0 (Then).
105   // Position==2 means we want the second state bit, stored at bit 3
106   // of Mask, and so on downwards. So (5 - Position) will shift the
107   // right bit down to bit 0, including the always-0 bit at bit 4 for
108   // the mandatory initial Then.
109   return (Mask >> (5 - Position) & 1);
110 }
111 
112 class UnwindContext {
113   using Locs = SmallVector<SMLoc, 4>;
114 
115   MCAsmParser &Parser;
116   Locs FnStartLocs;
117   Locs CantUnwindLocs;
118   Locs PersonalityLocs;
119   Locs PersonalityIndexLocs;
120   Locs HandlerDataLocs;
121   int FPReg;
122 
123 public:
124   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
125 
126   bool hasFnStart() const { return !FnStartLocs.empty(); }
127   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
128   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
129 
130   bool hasPersonality() const {
131     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
132   }
133 
134   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
135   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
136   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
137   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
138   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
139 
140   void saveFPReg(int Reg) { FPReg = Reg; }
141   int getFPReg() const { return FPReg; }
142 
143   void emitFnStartLocNotes() const {
144     for (const SMLoc &Loc : FnStartLocs)
145       Parser.Note(Loc, ".fnstart was specified here");
146   }
147 
148   void emitCantUnwindLocNotes() const {
149     for (const SMLoc &Loc : CantUnwindLocs)
150       Parser.Note(Loc, ".cantunwind was specified here");
151   }
152 
153   void emitHandlerDataLocNotes() const {
154     for (const SMLoc &Loc : HandlerDataLocs)
155       Parser.Note(Loc, ".handlerdata was specified here");
156   }
157 
158   void emitPersonalityLocNotes() const {
159     for (Locs::const_iterator PI = PersonalityLocs.begin(),
160                               PE = PersonalityLocs.end(),
161                               PII = PersonalityIndexLocs.begin(),
162                               PIE = PersonalityIndexLocs.end();
163          PI != PE || PII != PIE;) {
164       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165         Parser.Note(*PI++, ".personality was specified here");
166       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167         Parser.Note(*PII++, ".personalityindex was specified here");
168       else
169         llvm_unreachable(".personality and .personalityindex cannot be "
170                          "at the same location");
171     }
172   }
173 
174   void reset() {
175     FnStartLocs = Locs();
176     CantUnwindLocs = Locs();
177     PersonalityLocs = Locs();
178     HandlerDataLocs = Locs();
179     PersonalityIndexLocs = Locs();
180     FPReg = ARM::SP;
181   }
182 };
183 
184 // Various sets of ARM instruction mnemonics which are used by the asm parser
185 class ARMMnemonicSets {
186   StringSet<> CDE;
187   StringSet<> CDEWithVPTSuffix;
188 public:
189   ARMMnemonicSets(const MCSubtargetInfo &STI);
190 
191   /// Returns true iff a given mnemonic is a CDE instruction
192   bool isCDEInstr(StringRef Mnemonic) {
193     // Quick check before searching the set
194     if (!Mnemonic.startswith("cx") && !Mnemonic.startswith("vcx"))
195       return false;
196     return CDE.count(Mnemonic);
197   }
198 
199   /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200   /// (possibly with a predication suffix "e" or "t")
201   bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202     if (!Mnemonic.startswith("vcx"))
203       return false;
204     return CDEWithVPTSuffix.count(Mnemonic);
205   }
206 
207   /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208   /// (possibly with a condition suffix)
209   bool isITPredicableCDEInstr(StringRef Mnemonic) {
210     if (!Mnemonic.startswith("cx"))
211       return false;
212     return Mnemonic.startswith("cx1a") || Mnemonic.startswith("cx1da") ||
213            Mnemonic.startswith("cx2a") || Mnemonic.startswith("cx2da") ||
214            Mnemonic.startswith("cx3a") || Mnemonic.startswith("cx3da");
215   }
216 
217   /// Return true iff a given mnemonic is an integer CDE instruction with
218   /// dual-register destination
219   bool isCDEDualRegInstr(StringRef Mnemonic) {
220     if (!Mnemonic.startswith("cx"))
221       return false;
222     return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223            Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224            Mnemonic == "cx3d" || Mnemonic == "cx3da";
225   }
226 };
227 
228 ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229   for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230                              "cx2", "cx2a", "cx2d", "cx2da",
231                              "cx3", "cx3a", "cx3d", "cx3da", })
232     CDE.insert(Mnemonic);
233   for (StringRef Mnemonic :
234        {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235     CDE.insert(Mnemonic);
236     CDEWithVPTSuffix.insert(Mnemonic);
237     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238     CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239   }
240 }
241 
242 class ARMAsmParser : public MCTargetAsmParser {
243   const MCRegisterInfo *MRI;
244   UnwindContext UC;
245   ARMMnemonicSets MS;
246 
247   ARMTargetStreamer &getTargetStreamer() {
248     assert(getParser().getStreamer().getTargetStreamer() &&
249            "do not have a target streamer");
250     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
251     return static_cast<ARMTargetStreamer &>(TS);
252   }
253 
254   // Map of register aliases registers via the .req directive.
255   StringMap<unsigned> RegisterReqs;
256 
257   bool NextSymbolIsThumb;
258 
259   bool useImplicitITThumb() const {
260     return ImplicitItMode == ImplicitItModeTy::Always ||
261            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262   }
263 
264   bool useImplicitITARM() const {
265     return ImplicitItMode == ImplicitItModeTy::Always ||
266            ImplicitItMode == ImplicitItModeTy::ARMOnly;
267   }
268 
269   struct {
270     ARMCC::CondCodes Cond;    // Condition for IT block.
271     unsigned Mask:4;          // Condition mask for instructions.
272                               // Starting at first 1 (from lsb).
273                               //   '1'  condition as indicated in IT.
274                               //   '0'  inverse of condition (else).
275                               // Count of instructions in IT block is
276                               // 4 - trailingzeroes(mask)
277                               // Note that this does not have the same encoding
278                               // as in the IT instruction, which also depends
279                               // on the low bit of the condition code.
280 
281     unsigned CurPosition;     // Current position in parsing of IT
282                               // block. In range [0,4], with 0 being the IT
283                               // instruction itself. Initialized according to
284                               // count of instructions in block.  ~0U if no
285                               // active IT block.
286 
287     bool IsExplicit;          // true  - The IT instruction was present in the
288                               //         input, we should not modify it.
289                               // false - The IT instruction was added
290                               //         implicitly, we can extend it if that
291                               //         would be legal.
292   } ITState;
293 
294   SmallVector<MCInst, 4> PendingConditionalInsts;
295 
296   void flushPendingInstructions(MCStreamer &Out) override {
297     if (!inImplicitITBlock()) {
298       assert(PendingConditionalInsts.size() == 0);
299       return;
300     }
301 
302     // Emit the IT instruction
303     MCInst ITInst;
304     ITInst.setOpcode(ARM::t2IT);
305     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307     Out.emitInstruction(ITInst, getSTI());
308 
309     // Emit the conditional instructions
310     assert(PendingConditionalInsts.size() <= 4);
311     for (const MCInst &Inst : PendingConditionalInsts) {
312       Out.emitInstruction(Inst, getSTI());
313     }
314     PendingConditionalInsts.clear();
315 
316     // Clear the IT state
317     ITState.Mask = 0;
318     ITState.CurPosition = ~0U;
319   }
320 
321   bool inITBlock() { return ITState.CurPosition != ~0U; }
322   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
323   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324 
325   bool lastInITBlock() {
326     return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
327   }
328 
329   void forwardITPosition() {
330     if (!inITBlock()) return;
331     // Move to the next instruction in the IT block, if there is one. If not,
332     // mark the block as done, except for implicit IT blocks, which we leave
333     // open until we find an instruction that can't be added to it.
334     unsigned TZ = llvm::countr_zero(ITState.Mask);
335     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336       ITState.CurPosition = ~0U; // Done with the IT block after this.
337   }
338 
339   // Rewind the state of the current IT block, removing the last slot from it.
340   void rewindImplicitITPosition() {
341     assert(inImplicitITBlock());
342     assert(ITState.CurPosition > 1);
343     ITState.CurPosition--;
344     unsigned TZ = llvm::countr_zero(ITState.Mask);
345     unsigned NewMask = 0;
346     NewMask |= ITState.Mask & (0xC << TZ);
347     NewMask |= 0x2 << TZ;
348     ITState.Mask = NewMask;
349   }
350 
351   // Rewind the state of the current IT block, removing the last slot from it.
352   // If we were at the first slot, this closes the IT block.
353   void discardImplicitITBlock() {
354     assert(inImplicitITBlock());
355     assert(ITState.CurPosition == 1);
356     ITState.CurPosition = ~0U;
357   }
358 
359   // Return the low-subreg of a given Q register.
360   unsigned getDRegFromQReg(unsigned QReg) const {
361     return MRI->getSubReg(QReg, ARM::dsub_0);
362   }
363 
364   // Get the condition code corresponding to the current IT block slot.
365   ARMCC::CondCodes currentITCond() {
366     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368   }
369 
370   // Invert the condition of the current IT block slot without changing any
371   // other slots in the same block.
372   void invertCurrentITCondition() {
373     if (ITState.CurPosition == 1) {
374       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375     } else {
376       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377     }
378   }
379 
380   // Returns true if the current IT block is full (all 4 slots used).
381   bool isITBlockFull() {
382     return inITBlock() && (ITState.Mask & 1);
383   }
384 
385   // Extend the current implicit IT block to have one more slot with the given
386   // condition code.
387   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388     assert(inImplicitITBlock());
389     assert(!isITBlockFull());
390     assert(Cond == ITState.Cond ||
391            Cond == ARMCC::getOppositeCondition(ITState.Cond));
392     unsigned TZ = llvm::countr_zero(ITState.Mask);
393     unsigned NewMask = 0;
394     // Keep any existing condition bits.
395     NewMask |= ITState.Mask & (0xE << TZ);
396     // Insert the new condition bit.
397     NewMask |= (Cond != ITState.Cond) << TZ;
398     // Move the trailing 1 down one bit.
399     NewMask |= 1 << (TZ - 1);
400     ITState.Mask = NewMask;
401   }
402 
403   // Create a new implicit IT block with a dummy condition code.
404   void startImplicitITBlock() {
405     assert(!inITBlock());
406     ITState.Cond = ARMCC::AL;
407     ITState.Mask = 8;
408     ITState.CurPosition = 1;
409     ITState.IsExplicit = false;
410   }
411 
412   // Create a new explicit IT block with the given condition and mask.
413   // The mask should be in the format used in ARMOperand and
414   // MCOperand, with a 1 implying 'e', regardless of the low bit of
415   // the condition.
416   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417     assert(!inITBlock());
418     ITState.Cond = Cond;
419     ITState.Mask = Mask;
420     ITState.CurPosition = 0;
421     ITState.IsExplicit = true;
422   }
423 
424   struct {
425     unsigned Mask : 4;
426     unsigned CurPosition;
427   } VPTState;
428   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
429   void forwardVPTPosition() {
430     if (!inVPTBlock()) return;
431     unsigned TZ = llvm::countr_zero(VPTState.Mask);
432     if (++VPTState.CurPosition == 5 - TZ)
433       VPTState.CurPosition = ~0U;
434   }
435 
436   void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
437     return getParser().Note(L, Msg, Range);
438   }
439 
440   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
441     return getParser().Warning(L, Msg, Range);
442   }
443 
444   bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
445     return getParser().Error(L, Msg, Range);
446   }
447 
448   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449                            unsigned ListNo, bool IsARPop = false);
450   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451                            unsigned ListNo);
452 
453   int tryParseRegister();
454   bool tryParseRegisterWithWriteBack(OperandVector &);
455   int tryParseShiftRegister(OperandVector &);
456   bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
457                          bool AllowRAAC = false);
458   bool parseMemory(OperandVector &);
459   bool parseOperand(OperandVector &, StringRef Mnemonic);
460   bool parseImmExpr(int64_t &Out);
461   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
462   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
463                               unsigned &ShiftAmount);
464   bool parseLiteralValues(unsigned Size, SMLoc L);
465   bool parseDirectiveThumb(SMLoc L);
466   bool parseDirectiveARM(SMLoc L);
467   bool parseDirectiveThumbFunc(SMLoc L);
468   bool parseDirectiveCode(SMLoc L);
469   bool parseDirectiveSyntax(SMLoc L);
470   bool parseDirectiveReq(StringRef Name, SMLoc L);
471   bool parseDirectiveUnreq(SMLoc L);
472   bool parseDirectiveArch(SMLoc L);
473   bool parseDirectiveEabiAttr(SMLoc L);
474   bool parseDirectiveCPU(SMLoc L);
475   bool parseDirectiveFPU(SMLoc L);
476   bool parseDirectiveFnStart(SMLoc L);
477   bool parseDirectiveFnEnd(SMLoc L);
478   bool parseDirectiveCantUnwind(SMLoc L);
479   bool parseDirectivePersonality(SMLoc L);
480   bool parseDirectiveHandlerData(SMLoc L);
481   bool parseDirectiveSetFP(SMLoc L);
482   bool parseDirectivePad(SMLoc L);
483   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
484   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
485   bool parseDirectiveLtorg(SMLoc L);
486   bool parseDirectiveEven(SMLoc L);
487   bool parseDirectivePersonalityIndex(SMLoc L);
488   bool parseDirectiveUnwindRaw(SMLoc L);
489   bool parseDirectiveTLSDescSeq(SMLoc L);
490   bool parseDirectiveMovSP(SMLoc L);
491   bool parseDirectiveObjectArch(SMLoc L);
492   bool parseDirectiveArchExtension(SMLoc L);
493   bool parseDirectiveAlign(SMLoc L);
494   bool parseDirectiveThumbSet(SMLoc L);
495 
496   bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
497   bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
498   bool parseDirectiveSEHSaveSP(SMLoc L);
499   bool parseDirectiveSEHSaveFRegs(SMLoc L);
500   bool parseDirectiveSEHSaveLR(SMLoc L);
501   bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
502   bool parseDirectiveSEHNop(SMLoc L, bool Wide);
503   bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
504   bool parseDirectiveSEHEpilogEnd(SMLoc L);
505   bool parseDirectiveSEHCustom(SMLoc L);
506 
507   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
508   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
509                           unsigned &PredicationCode,
510                           unsigned &VPTPredicationCode, bool &CarrySetting,
511                           unsigned &ProcessorIMod, StringRef &ITMask);
512   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
513                              StringRef FullInst, bool &CanAcceptCarrySet,
514                              bool &CanAcceptPredicationCode,
515                              bool &CanAcceptVPTPredicationCode);
516   bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
517 
518   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
519                                      OperandVector &Operands);
520   bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
521 
522   bool isThumb() const {
523     // FIXME: Can tablegen auto-generate this?
524     return getSTI().hasFeature(ARM::ModeThumb);
525   }
526 
527   bool isThumbOne() const {
528     return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
529   }
530 
531   bool isThumbTwo() const {
532     return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
533   }
534 
535   bool hasThumb() const {
536     return getSTI().hasFeature(ARM::HasV4TOps);
537   }
538 
539   bool hasThumb2() const {
540     return getSTI().hasFeature(ARM::FeatureThumb2);
541   }
542 
543   bool hasV6Ops() const {
544     return getSTI().hasFeature(ARM::HasV6Ops);
545   }
546 
547   bool hasV6T2Ops() const {
548     return getSTI().hasFeature(ARM::HasV6T2Ops);
549   }
550 
551   bool hasV6MOps() const {
552     return getSTI().hasFeature(ARM::HasV6MOps);
553   }
554 
555   bool hasV7Ops() const {
556     return getSTI().hasFeature(ARM::HasV7Ops);
557   }
558 
559   bool hasV8Ops() const {
560     return getSTI().hasFeature(ARM::HasV8Ops);
561   }
562 
563   bool hasV8MBaseline() const {
564     return getSTI().hasFeature(ARM::HasV8MBaselineOps);
565   }
566 
567   bool hasV8MMainline() const {
568     return getSTI().hasFeature(ARM::HasV8MMainlineOps);
569   }
570   bool hasV8_1MMainline() const {
571     return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
572   }
573   bool hasMVE() const {
574     return getSTI().hasFeature(ARM::HasMVEIntegerOps);
575   }
576   bool hasMVEFloat() const {
577     return getSTI().hasFeature(ARM::HasMVEFloatOps);
578   }
579   bool hasCDE() const {
580     return getSTI().hasFeature(ARM::HasCDEOps);
581   }
582   bool has8MSecExt() const {
583     return getSTI().hasFeature(ARM::Feature8MSecExt);
584   }
585 
586   bool hasARM() const {
587     return !getSTI().hasFeature(ARM::FeatureNoARM);
588   }
589 
590   bool hasDSP() const {
591     return getSTI().hasFeature(ARM::FeatureDSP);
592   }
593 
594   bool hasD32() const {
595     return getSTI().hasFeature(ARM::FeatureD32);
596   }
597 
598   bool hasV8_1aOps() const {
599     return getSTI().hasFeature(ARM::HasV8_1aOps);
600   }
601 
602   bool hasRAS() const {
603     return getSTI().hasFeature(ARM::FeatureRAS);
604   }
605 
606   void SwitchMode() {
607     MCSubtargetInfo &STI = copySTI();
608     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
609     setAvailableFeatures(FB);
610   }
611 
612   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
613 
614   bool isMClass() const {
615     return getSTI().hasFeature(ARM::FeatureMClass);
616   }
617 
618   /// @name Auto-generated Match Functions
619   /// {
620 
621 #define GET_ASSEMBLER_HEADER
622 #include "ARMGenAsmMatcher.inc"
623 
624   /// }
625 
626   ParseStatus parseITCondCode(OperandVector &);
627   ParseStatus parseCoprocNumOperand(OperandVector &);
628   ParseStatus parseCoprocRegOperand(OperandVector &);
629   ParseStatus parseCoprocOptionOperand(OperandVector &);
630   ParseStatus parseMemBarrierOptOperand(OperandVector &);
631   ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
632   ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
633   ParseStatus parseProcIFlagsOperand(OperandVector &);
634   ParseStatus parseMSRMaskOperand(OperandVector &);
635   ParseStatus parseBankedRegOperand(OperandVector &);
636   ParseStatus parsePKHImm(OperandVector &O, StringRef Op, int Low, int High);
637   ParseStatus parsePKHLSLImm(OperandVector &O) {
638     return parsePKHImm(O, "lsl", 0, 31);
639   }
640   ParseStatus parsePKHASRImm(OperandVector &O) {
641     return parsePKHImm(O, "asr", 1, 32);
642   }
643   ParseStatus parseSetEndImm(OperandVector &);
644   ParseStatus parseShifterImm(OperandVector &);
645   ParseStatus parseRotImm(OperandVector &);
646   ParseStatus parseModImm(OperandVector &);
647   ParseStatus parseBitfield(OperandVector &);
648   ParseStatus parsePostIdxReg(OperandVector &);
649   ParseStatus parseAM3Offset(OperandVector &);
650   ParseStatus parseFPImm(OperandVector &);
651   ParseStatus parseVectorList(OperandVector &);
652   ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
653                               SMLoc &EndLoc);
654 
655   // Asm Match Converter Methods
656   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
657   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
658   void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
659 
660   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
661   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
662   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
663   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
664   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
665   bool isITBlockTerminator(MCInst &Inst) const;
666   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
667   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
668                         bool Load, bool ARMMode, bool Writeback);
669 
670 public:
671   enum ARMMatchResultTy {
672     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
673     Match_RequiresNotITBlock,
674     Match_RequiresV6,
675     Match_RequiresThumb2,
676     Match_RequiresV8,
677     Match_RequiresFlagSetting,
678 #define GET_OPERAND_DIAGNOSTIC_TYPES
679 #include "ARMGenAsmMatcher.inc"
680 
681   };
682 
683   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
684                const MCInstrInfo &MII, const MCTargetOptions &Options)
685     : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
686     MCAsmParserExtension::Initialize(Parser);
687 
688     // Cache the MCRegisterInfo.
689     MRI = getContext().getRegisterInfo();
690 
691     // Initialize the set of available features.
692     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
693 
694     // Add build attributes based on the selected target.
695     if (AddBuildAttributes)
696       getTargetStreamer().emitTargetAttributes(STI);
697 
698     // Not in an ITBlock to start with.
699     ITState.CurPosition = ~0U;
700 
701     VPTState.CurPosition = ~0U;
702 
703     NextSymbolIsThumb = false;
704   }
705 
706   // Implementation of the MCTargetAsmParser interface:
707   bool parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
708                      SMLoc &EndLoc) override;
709   OperandMatchResultTy tryParseRegister(MCRegister &RegNo, SMLoc &StartLoc,
710                                         SMLoc &EndLoc) override;
711   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
712                         SMLoc NameLoc, OperandVector &Operands) override;
713   bool ParseDirective(AsmToken DirectiveID) override;
714 
715   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
716                                       unsigned Kind) override;
717   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
718 
719   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
720                                OperandVector &Operands, MCStreamer &Out,
721                                uint64_t &ErrorInfo,
722                                bool MatchingInlineAsm) override;
723   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
724                             SmallVectorImpl<NearMissInfo> &NearMisses,
725                             bool MatchingInlineAsm, bool &EmitInITBlock,
726                             MCStreamer &Out);
727 
728   struct NearMissMessage {
729     SMLoc Loc;
730     SmallString<128> Message;
731   };
732 
733   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
734 
735   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
736                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
737                         SMLoc IDLoc, OperandVector &Operands);
738   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
739                         OperandVector &Operands);
740 
741   void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
742 
743   void onLabelParsed(MCSymbol *Symbol) override;
744 };
745 
746 /// ARMOperand - Instances of this class represent a parsed ARM machine
747 /// operand.
748 class ARMOperand : public MCParsedAsmOperand {
749   enum KindTy {
750     k_CondCode,
751     k_VPTPred,
752     k_CCOut,
753     k_ITCondMask,
754     k_CoprocNum,
755     k_CoprocReg,
756     k_CoprocOption,
757     k_Immediate,
758     k_MemBarrierOpt,
759     k_InstSyncBarrierOpt,
760     k_TraceSyncBarrierOpt,
761     k_Memory,
762     k_PostIndexRegister,
763     k_MSRMask,
764     k_BankedReg,
765     k_ProcIFlags,
766     k_VectorIndex,
767     k_Register,
768     k_RegisterList,
769     k_RegisterListWithAPSR,
770     k_DPRRegisterList,
771     k_SPRRegisterList,
772     k_FPSRegisterListWithVPR,
773     k_FPDRegisterListWithVPR,
774     k_VectorList,
775     k_VectorListAllLanes,
776     k_VectorListIndexed,
777     k_ShiftedRegister,
778     k_ShiftedImmediate,
779     k_ShifterImmediate,
780     k_RotateImmediate,
781     k_ModifiedImmediate,
782     k_ConstantPoolImmediate,
783     k_BitfieldDescriptor,
784     k_Token,
785   } Kind;
786 
787   SMLoc StartLoc, EndLoc, AlignmentLoc;
788   SmallVector<unsigned, 8> Registers;
789 
790   struct CCOp {
791     ARMCC::CondCodes Val;
792   };
793 
794   struct VCCOp {
795     ARMVCC::VPTCodes Val;
796   };
797 
798   struct CopOp {
799     unsigned Val;
800   };
801 
802   struct CoprocOptionOp {
803     unsigned Val;
804   };
805 
806   struct ITMaskOp {
807     unsigned Mask:4;
808   };
809 
810   struct MBOptOp {
811     ARM_MB::MemBOpt Val;
812   };
813 
814   struct ISBOptOp {
815     ARM_ISB::InstSyncBOpt Val;
816   };
817 
818   struct TSBOptOp {
819     ARM_TSB::TraceSyncBOpt Val;
820   };
821 
822   struct IFlagsOp {
823     ARM_PROC::IFlags Val;
824   };
825 
826   struct MMaskOp {
827     unsigned Val;
828   };
829 
830   struct BankedRegOp {
831     unsigned Val;
832   };
833 
834   struct TokOp {
835     const char *Data;
836     unsigned Length;
837   };
838 
839   struct RegOp {
840     unsigned RegNum;
841   };
842 
843   // A vector register list is a sequential list of 1 to 4 registers.
844   struct VectorListOp {
845     unsigned RegNum;
846     unsigned Count;
847     unsigned LaneIndex;
848     bool isDoubleSpaced;
849   };
850 
851   struct VectorIndexOp {
852     unsigned Val;
853   };
854 
855   struct ImmOp {
856     const MCExpr *Val;
857   };
858 
859   /// Combined record for all forms of ARM address expressions.
860   struct MemoryOp {
861     unsigned BaseRegNum;
862     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
863     // was specified.
864     const MCExpr *OffsetImm;  // Offset immediate value
865     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
866     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
867     unsigned ShiftImm;        // shift for OffsetReg.
868     unsigned Alignment;       // 0 = no alignment specified
869     // n = alignment in bytes (2, 4, 8, 16, or 32)
870     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
871   };
872 
873   struct PostIdxRegOp {
874     unsigned RegNum;
875     bool isAdd;
876     ARM_AM::ShiftOpc ShiftTy;
877     unsigned ShiftImm;
878   };
879 
880   struct ShifterImmOp {
881     bool isASR;
882     unsigned Imm;
883   };
884 
885   struct RegShiftedRegOp {
886     ARM_AM::ShiftOpc ShiftTy;
887     unsigned SrcReg;
888     unsigned ShiftReg;
889     unsigned ShiftImm;
890   };
891 
892   struct RegShiftedImmOp {
893     ARM_AM::ShiftOpc ShiftTy;
894     unsigned SrcReg;
895     unsigned ShiftImm;
896   };
897 
898   struct RotImmOp {
899     unsigned Imm;
900   };
901 
902   struct ModImmOp {
903     unsigned Bits;
904     unsigned Rot;
905   };
906 
907   struct BitfieldOp {
908     unsigned LSB;
909     unsigned Width;
910   };
911 
912   union {
913     struct CCOp CC;
914     struct VCCOp VCC;
915     struct CopOp Cop;
916     struct CoprocOptionOp CoprocOption;
917     struct MBOptOp MBOpt;
918     struct ISBOptOp ISBOpt;
919     struct TSBOptOp TSBOpt;
920     struct ITMaskOp ITMask;
921     struct IFlagsOp IFlags;
922     struct MMaskOp MMask;
923     struct BankedRegOp BankedReg;
924     struct TokOp Tok;
925     struct RegOp Reg;
926     struct VectorListOp VectorList;
927     struct VectorIndexOp VectorIndex;
928     struct ImmOp Imm;
929     struct MemoryOp Memory;
930     struct PostIdxRegOp PostIdxReg;
931     struct ShifterImmOp ShifterImm;
932     struct RegShiftedRegOp RegShiftedReg;
933     struct RegShiftedImmOp RegShiftedImm;
934     struct RotImmOp RotImm;
935     struct ModImmOp ModImm;
936     struct BitfieldOp Bitfield;
937   };
938 
939 public:
940   ARMOperand(KindTy K) : Kind(K) {}
941 
942   /// getStartLoc - Get the location of the first token of this operand.
943   SMLoc getStartLoc() const override { return StartLoc; }
944 
945   /// getEndLoc - Get the location of the last token of this operand.
946   SMLoc getEndLoc() const override { return EndLoc; }
947 
948   /// getLocRange - Get the range between the first and last token of this
949   /// operand.
950   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
951 
952   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
953   SMLoc getAlignmentLoc() const {
954     assert(Kind == k_Memory && "Invalid access!");
955     return AlignmentLoc;
956   }
957 
958   ARMCC::CondCodes getCondCode() const {
959     assert(Kind == k_CondCode && "Invalid access!");
960     return CC.Val;
961   }
962 
963   ARMVCC::VPTCodes getVPTPred() const {
964     assert(isVPTPred() && "Invalid access!");
965     return VCC.Val;
966   }
967 
968   unsigned getCoproc() const {
969     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
970     return Cop.Val;
971   }
972 
973   StringRef getToken() const {
974     assert(Kind == k_Token && "Invalid access!");
975     return StringRef(Tok.Data, Tok.Length);
976   }
977 
978   unsigned getReg() const override {
979     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
980     return Reg.RegNum;
981   }
982 
983   const SmallVectorImpl<unsigned> &getRegList() const {
984     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
985             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
986             Kind == k_FPSRegisterListWithVPR ||
987             Kind == k_FPDRegisterListWithVPR) &&
988            "Invalid access!");
989     return Registers;
990   }
991 
992   const MCExpr *getImm() const {
993     assert(isImm() && "Invalid access!");
994     return Imm.Val;
995   }
996 
997   const MCExpr *getConstantPoolImm() const {
998     assert(isConstantPoolImm() && "Invalid access!");
999     return Imm.Val;
1000   }
1001 
1002   unsigned getVectorIndex() const {
1003     assert(Kind == k_VectorIndex && "Invalid access!");
1004     return VectorIndex.Val;
1005   }
1006 
1007   ARM_MB::MemBOpt getMemBarrierOpt() const {
1008     assert(Kind == k_MemBarrierOpt && "Invalid access!");
1009     return MBOpt.Val;
1010   }
1011 
1012   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1013     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1014     return ISBOpt.Val;
1015   }
1016 
1017   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1018     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1019     return TSBOpt.Val;
1020   }
1021 
1022   ARM_PROC::IFlags getProcIFlags() const {
1023     assert(Kind == k_ProcIFlags && "Invalid access!");
1024     return IFlags.Val;
1025   }
1026 
1027   unsigned getMSRMask() const {
1028     assert(Kind == k_MSRMask && "Invalid access!");
1029     return MMask.Val;
1030   }
1031 
1032   unsigned getBankedReg() const {
1033     assert(Kind == k_BankedReg && "Invalid access!");
1034     return BankedReg.Val;
1035   }
1036 
1037   bool isCoprocNum() const { return Kind == k_CoprocNum; }
1038   bool isCoprocReg() const { return Kind == k_CoprocReg; }
1039   bool isCoprocOption() const { return Kind == k_CoprocOption; }
1040   bool isCondCode() const { return Kind == k_CondCode; }
1041   bool isVPTPred() const { return Kind == k_VPTPred; }
1042   bool isCCOut() const { return Kind == k_CCOut; }
1043   bool isITMask() const { return Kind == k_ITCondMask; }
1044   bool isITCondCode() const { return Kind == k_CondCode; }
1045   bool isImm() const override {
1046     return Kind == k_Immediate;
1047   }
1048 
1049   bool isARMBranchTarget() const {
1050     if (!isImm()) return false;
1051 
1052     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1053       return CE->getValue() % 4 == 0;
1054     return true;
1055   }
1056 
1057 
1058   bool isThumbBranchTarget() const {
1059     if (!isImm()) return false;
1060 
1061     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1062       return CE->getValue() % 2 == 0;
1063     return true;
1064   }
1065 
1066   // checks whether this operand is an unsigned offset which fits is a field
1067   // of specified width and scaled by a specific number of bits
1068   template<unsigned width, unsigned scale>
1069   bool isUnsignedOffset() const {
1070     if (!isImm()) return false;
1071     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1072     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1073       int64_t Val = CE->getValue();
1074       int64_t Align = 1LL << scale;
1075       int64_t Max = Align * ((1LL << width) - 1);
1076       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1077     }
1078     return false;
1079   }
1080 
1081   // checks whether this operand is an signed offset which fits is a field
1082   // of specified width and scaled by a specific number of bits
1083   template<unsigned width, unsigned scale>
1084   bool isSignedOffset() const {
1085     if (!isImm()) return false;
1086     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1087     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1088       int64_t Val = CE->getValue();
1089       int64_t Align = 1LL << scale;
1090       int64_t Max = Align * ((1LL << (width-1)) - 1);
1091       int64_t Min = -Align * (1LL << (width-1));
1092       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1093     }
1094     return false;
1095   }
1096 
1097   // checks whether this operand is an offset suitable for the LE /
1098   // LETP instructions in Arm v8.1M
1099   bool isLEOffset() const {
1100     if (!isImm()) return false;
1101     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1102     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1103       int64_t Val = CE->getValue();
1104       return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1105     }
1106     return false;
1107   }
1108 
1109   // checks whether this operand is a memory operand computed as an offset
1110   // applied to PC. the offset may have 8 bits of magnitude and is represented
1111   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1112   // relocable expression...
1113   bool isThumbMemPC() const {
1114     int64_t Val = 0;
1115     if (isImm()) {
1116       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1117       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1118       if (!CE) return false;
1119       Val = CE->getValue();
1120     }
1121     else if (isGPRMem()) {
1122       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1123       if(Memory.BaseRegNum != ARM::PC) return false;
1124       if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1125         Val = CE->getValue();
1126       else
1127         return false;
1128     }
1129     else return false;
1130     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1131   }
1132 
1133   bool isFPImm() const {
1134     if (!isImm()) return false;
1135     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1136     if (!CE) return false;
1137     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1138     return Val != -1;
1139   }
1140 
1141   template<int64_t N, int64_t M>
1142   bool isImmediate() const {
1143     if (!isImm()) return false;
1144     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1145     if (!CE) return false;
1146     int64_t Value = CE->getValue();
1147     return Value >= N && Value <= M;
1148   }
1149 
1150   template<int64_t N, int64_t M>
1151   bool isImmediateS4() const {
1152     if (!isImm()) return false;
1153     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1154     if (!CE) return false;
1155     int64_t Value = CE->getValue();
1156     return ((Value & 3) == 0) && Value >= N && Value <= M;
1157   }
1158   template<int64_t N, int64_t M>
1159   bool isImmediateS2() const {
1160     if (!isImm()) return false;
1161     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1162     if (!CE) return false;
1163     int64_t Value = CE->getValue();
1164     return ((Value & 1) == 0) && Value >= N && Value <= M;
1165   }
1166   bool isFBits16() const {
1167     return isImmediate<0, 17>();
1168   }
1169   bool isFBits32() const {
1170     return isImmediate<1, 33>();
1171   }
1172   bool isImm8s4() const {
1173     return isImmediateS4<-1020, 1020>();
1174   }
1175   bool isImm7s4() const {
1176     return isImmediateS4<-508, 508>();
1177   }
1178   bool isImm7Shift0() const {
1179     return isImmediate<-127, 127>();
1180   }
1181   bool isImm7Shift1() const {
1182     return isImmediateS2<-255, 255>();
1183   }
1184   bool isImm7Shift2() const {
1185     return isImmediateS4<-511, 511>();
1186   }
1187   bool isImm7() const {
1188     return isImmediate<-127, 127>();
1189   }
1190   bool isImm0_1020s4() const {
1191     return isImmediateS4<0, 1020>();
1192   }
1193   bool isImm0_508s4() const {
1194     return isImmediateS4<0, 508>();
1195   }
1196   bool isImm0_508s4Neg() const {
1197     if (!isImm()) return false;
1198     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1199     if (!CE) return false;
1200     int64_t Value = -CE->getValue();
1201     // explicitly exclude zero. we want that to use the normal 0_508 version.
1202     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1203   }
1204 
1205   bool isImm0_4095Neg() const {
1206     if (!isImm()) return false;
1207     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1208     if (!CE) return false;
1209     // isImm0_4095Neg is used with 32-bit immediates only.
1210     // 32-bit immediates are zero extended to 64-bit when parsed,
1211     // thus simple -CE->getValue() results in a big negative number,
1212     // not a small positive number as intended
1213     if ((CE->getValue() >> 32) > 0) return false;
1214     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1215     return Value > 0 && Value < 4096;
1216   }
1217 
1218   bool isImm0_7() const {
1219     return isImmediate<0, 7>();
1220   }
1221 
1222   bool isImm1_16() const {
1223     return isImmediate<1, 16>();
1224   }
1225 
1226   bool isImm1_32() const {
1227     return isImmediate<1, 32>();
1228   }
1229 
1230   bool isImm8_255() const {
1231     return isImmediate<8, 255>();
1232   }
1233 
1234   bool isImm0_255Expr() const {
1235     if (!isImm())
1236       return false;
1237     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1238     // If it's not a constant expression, it'll generate a fixup and be
1239     // handled later.
1240     if (!CE)
1241       return true;
1242     int64_t Value = CE->getValue();
1243     return isUInt<8>(Value);
1244   }
1245 
1246   bool isImm256_65535Expr() const {
1247     if (!isImm()) return false;
1248     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1249     // If it's not a constant expression, it'll generate a fixup and be
1250     // handled later.
1251     if (!CE) return true;
1252     int64_t Value = CE->getValue();
1253     return Value >= 256 && Value < 65536;
1254   }
1255 
1256   bool isImm0_65535Expr() const {
1257     if (!isImm()) return false;
1258     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1259     // If it's not a constant expression, it'll generate a fixup and be
1260     // handled later.
1261     if (!CE) return true;
1262     int64_t Value = CE->getValue();
1263     return Value >= 0 && Value < 65536;
1264   }
1265 
1266   bool isImm24bit() const {
1267     return isImmediate<0, 0xffffff + 1>();
1268   }
1269 
1270   bool isImmThumbSR() const {
1271     return isImmediate<1, 33>();
1272   }
1273 
1274   bool isPKHLSLImm() const {
1275     return isImmediate<0, 32>();
1276   }
1277 
1278   bool isPKHASRImm() const {
1279     return isImmediate<0, 33>();
1280   }
1281 
1282   bool isAdrLabel() const {
1283     // If we have an immediate that's not a constant, treat it as a label
1284     // reference needing a fixup.
1285     if (isImm() && !isa<MCConstantExpr>(getImm()))
1286       return true;
1287 
1288     // If it is a constant, it must fit into a modified immediate encoding.
1289     if (!isImm()) return false;
1290     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1291     if (!CE) return false;
1292     int64_t Value = CE->getValue();
1293     return (ARM_AM::getSOImmVal(Value) != -1 ||
1294             ARM_AM::getSOImmVal(-Value) != -1);
1295   }
1296 
1297   bool isT2SOImm() const {
1298     // If we have an immediate that's not a constant, treat it as an expression
1299     // needing a fixup.
1300     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1301       // We want to avoid matching :upper16: and :lower16: as we want these
1302       // expressions to match in isImm0_65535Expr()
1303       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1304       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1305                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1306     }
1307     if (!isImm()) return false;
1308     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1309     if (!CE) return false;
1310     int64_t Value = CE->getValue();
1311     return ARM_AM::getT2SOImmVal(Value) != -1;
1312   }
1313 
1314   bool isT2SOImmNot() const {
1315     if (!isImm()) return false;
1316     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1317     if (!CE) return false;
1318     int64_t Value = CE->getValue();
1319     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1320       ARM_AM::getT2SOImmVal(~Value) != -1;
1321   }
1322 
1323   bool isT2SOImmNeg() const {
1324     if (!isImm()) return false;
1325     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1326     if (!CE) return false;
1327     int64_t Value = CE->getValue();
1328     // Only use this when not representable as a plain so_imm.
1329     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1330       ARM_AM::getT2SOImmVal(-Value) != -1;
1331   }
1332 
1333   bool isSetEndImm() const {
1334     if (!isImm()) return false;
1335     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1336     if (!CE) return false;
1337     int64_t Value = CE->getValue();
1338     return Value == 1 || Value == 0;
1339   }
1340 
1341   bool isReg() const override { return Kind == k_Register; }
1342   bool isRegList() const { return Kind == k_RegisterList; }
1343   bool isRegListWithAPSR() const {
1344     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1345   }
1346   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1347   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1348   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1349   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1350   bool isToken() const override { return Kind == k_Token; }
1351   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1352   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1353   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1354   bool isMem() const override {
1355       return isGPRMem() || isMVEMem();
1356   }
1357   bool isMVEMem() const {
1358     if (Kind != k_Memory)
1359       return false;
1360     if (Memory.BaseRegNum &&
1361         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1362         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1363       return false;
1364     if (Memory.OffsetRegNum &&
1365         !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1366             Memory.OffsetRegNum))
1367       return false;
1368     return true;
1369   }
1370   bool isGPRMem() const {
1371     if (Kind != k_Memory)
1372       return false;
1373     if (Memory.BaseRegNum &&
1374         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1375       return false;
1376     if (Memory.OffsetRegNum &&
1377         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1378       return false;
1379     return true;
1380   }
1381   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1382   bool isRegShiftedReg() const {
1383     return Kind == k_ShiftedRegister &&
1384            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1385                RegShiftedReg.SrcReg) &&
1386            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1387                RegShiftedReg.ShiftReg);
1388   }
1389   bool isRegShiftedImm() const {
1390     return Kind == k_ShiftedImmediate &&
1391            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1392                RegShiftedImm.SrcReg);
1393   }
1394   bool isRotImm() const { return Kind == k_RotateImmediate; }
1395 
1396   template<unsigned Min, unsigned Max>
1397   bool isPowerTwoInRange() const {
1398     if (!isImm()) return false;
1399     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1400     if (!CE) return false;
1401     int64_t Value = CE->getValue();
1402     return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1403            Value <= Max;
1404   }
1405   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1406 
1407   bool isModImmNot() const {
1408     if (!isImm()) return false;
1409     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1410     if (!CE) return false;
1411     int64_t Value = CE->getValue();
1412     return ARM_AM::getSOImmVal(~Value) != -1;
1413   }
1414 
1415   bool isModImmNeg() const {
1416     if (!isImm()) return false;
1417     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1418     if (!CE) return false;
1419     int64_t Value = CE->getValue();
1420     return ARM_AM::getSOImmVal(Value) == -1 &&
1421       ARM_AM::getSOImmVal(-Value) != -1;
1422   }
1423 
1424   bool isThumbModImmNeg1_7() const {
1425     if (!isImm()) return false;
1426     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427     if (!CE) return false;
1428     int32_t Value = -(int32_t)CE->getValue();
1429     return 0 < Value && Value < 8;
1430   }
1431 
1432   bool isThumbModImmNeg8_255() const {
1433     if (!isImm()) return false;
1434     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1435     if (!CE) return false;
1436     int32_t Value = -(int32_t)CE->getValue();
1437     return 7 < Value && Value < 256;
1438   }
1439 
1440   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1441   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1442   bool isPostIdxRegShifted() const {
1443     return Kind == k_PostIndexRegister &&
1444            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1445   }
1446   bool isPostIdxReg() const {
1447     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1448   }
1449   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1450     if (!isGPRMem())
1451       return false;
1452     // No offset of any kind.
1453     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1454      (alignOK || Memory.Alignment == Alignment);
1455   }
1456   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1457     if (!isGPRMem())
1458       return false;
1459 
1460     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1461             Memory.BaseRegNum))
1462       return false;
1463 
1464     // No offset of any kind.
1465     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1466      (alignOK || Memory.Alignment == Alignment);
1467   }
1468   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1469     if (!isGPRMem())
1470       return false;
1471 
1472     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1473             Memory.BaseRegNum))
1474       return false;
1475 
1476     // No offset of any kind.
1477     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1478      (alignOK || Memory.Alignment == Alignment);
1479   }
1480   bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1481     if (!isGPRMem())
1482       return false;
1483 
1484     if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1485             Memory.BaseRegNum))
1486       return false;
1487 
1488     // No offset of any kind.
1489     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1490      (alignOK || Memory.Alignment == Alignment);
1491   }
1492   bool isMemPCRelImm12() const {
1493     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1494       return false;
1495     // Base register must be PC.
1496     if (Memory.BaseRegNum != ARM::PC)
1497       return false;
1498     // Immediate offset in range [-4095, 4095].
1499     if (!Memory.OffsetImm) return true;
1500     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1501       int64_t Val = CE->getValue();
1502       return (Val > -4096 && Val < 4096) ||
1503              (Val == std::numeric_limits<int32_t>::min());
1504     }
1505     return false;
1506   }
1507 
1508   bool isAlignedMemory() const {
1509     return isMemNoOffset(true);
1510   }
1511 
1512   bool isAlignedMemoryNone() const {
1513     return isMemNoOffset(false, 0);
1514   }
1515 
1516   bool isDupAlignedMemoryNone() const {
1517     return isMemNoOffset(false, 0);
1518   }
1519 
1520   bool isAlignedMemory16() const {
1521     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1522       return true;
1523     return isMemNoOffset(false, 0);
1524   }
1525 
1526   bool isDupAlignedMemory16() const {
1527     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1528       return true;
1529     return isMemNoOffset(false, 0);
1530   }
1531 
1532   bool isAlignedMemory32() const {
1533     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1534       return true;
1535     return isMemNoOffset(false, 0);
1536   }
1537 
1538   bool isDupAlignedMemory32() const {
1539     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1540       return true;
1541     return isMemNoOffset(false, 0);
1542   }
1543 
1544   bool isAlignedMemory64() const {
1545     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1546       return true;
1547     return isMemNoOffset(false, 0);
1548   }
1549 
1550   bool isDupAlignedMemory64() const {
1551     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1552       return true;
1553     return isMemNoOffset(false, 0);
1554   }
1555 
1556   bool isAlignedMemory64or128() const {
1557     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1558       return true;
1559     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1560       return true;
1561     return isMemNoOffset(false, 0);
1562   }
1563 
1564   bool isDupAlignedMemory64or128() const {
1565     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1566       return true;
1567     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1568       return true;
1569     return isMemNoOffset(false, 0);
1570   }
1571 
1572   bool isAlignedMemory64or128or256() const {
1573     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1574       return true;
1575     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1576       return true;
1577     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1578       return true;
1579     return isMemNoOffset(false, 0);
1580   }
1581 
1582   bool isAddrMode2() const {
1583     if (!isGPRMem() || Memory.Alignment != 0) return false;
1584     // Check for register offset.
1585     if (Memory.OffsetRegNum) return true;
1586     // Immediate offset in range [-4095, 4095].
1587     if (!Memory.OffsetImm) return true;
1588     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1589       int64_t Val = CE->getValue();
1590       return Val > -4096 && Val < 4096;
1591     }
1592     return false;
1593   }
1594 
1595   bool isAM2OffsetImm() const {
1596     if (!isImm()) return false;
1597     // Immediate offset in range [-4095, 4095].
1598     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1599     if (!CE) return false;
1600     int64_t Val = CE->getValue();
1601     return (Val == std::numeric_limits<int32_t>::min()) ||
1602            (Val > -4096 && Val < 4096);
1603   }
1604 
1605   bool isAddrMode3() const {
1606     // If we have an immediate that's not a constant, treat it as a label
1607     // reference needing a fixup. If it is a constant, it's something else
1608     // and we reject it.
1609     if (isImm() && !isa<MCConstantExpr>(getImm()))
1610       return true;
1611     if (!isGPRMem() || Memory.Alignment != 0) return false;
1612     // No shifts are legal for AM3.
1613     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1614     // Check for register offset.
1615     if (Memory.OffsetRegNum) return true;
1616     // Immediate offset in range [-255, 255].
1617     if (!Memory.OffsetImm) return true;
1618     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1619       int64_t Val = CE->getValue();
1620       // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1621       // we have to check for this too.
1622       return (Val > -256 && Val < 256) ||
1623              Val == std::numeric_limits<int32_t>::min();
1624     }
1625     return false;
1626   }
1627 
1628   bool isAM3Offset() const {
1629     if (isPostIdxReg())
1630       return true;
1631     if (!isImm())
1632       return false;
1633     // Immediate offset in range [-255, 255].
1634     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1635     if (!CE) return false;
1636     int64_t Val = CE->getValue();
1637     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1638     return (Val > -256 && Val < 256) ||
1639            Val == std::numeric_limits<int32_t>::min();
1640   }
1641 
1642   bool isAddrMode5() const {
1643     // If we have an immediate that's not a constant, treat it as a label
1644     // reference needing a fixup. If it is a constant, it's something else
1645     // and we reject it.
1646     if (isImm() && !isa<MCConstantExpr>(getImm()))
1647       return true;
1648     if (!isGPRMem() || Memory.Alignment != 0) return false;
1649     // Check for register offset.
1650     if (Memory.OffsetRegNum) return false;
1651     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1652     if (!Memory.OffsetImm) return true;
1653     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1654       int64_t Val = CE->getValue();
1655       return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1656              Val == std::numeric_limits<int32_t>::min();
1657     }
1658     return false;
1659   }
1660 
1661   bool isAddrMode5FP16() const {
1662     // If we have an immediate that's not a constant, treat it as a label
1663     // reference needing a fixup. If it is a constant, it's something else
1664     // and we reject it.
1665     if (isImm() && !isa<MCConstantExpr>(getImm()))
1666       return true;
1667     if (!isGPRMem() || Memory.Alignment != 0) return false;
1668     // Check for register offset.
1669     if (Memory.OffsetRegNum) return false;
1670     // Immediate offset in range [-510, 510] and a multiple of 2.
1671     if (!Memory.OffsetImm) return true;
1672     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1673       int64_t Val = CE->getValue();
1674       return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1675              Val == std::numeric_limits<int32_t>::min();
1676     }
1677     return false;
1678   }
1679 
1680   bool isMemTBB() const {
1681     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1682         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1683       return false;
1684     return true;
1685   }
1686 
1687   bool isMemTBH() const {
1688     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1689         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1690         Memory.Alignment != 0 )
1691       return false;
1692     return true;
1693   }
1694 
1695   bool isMemRegOffset() const {
1696     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1697       return false;
1698     return true;
1699   }
1700 
1701   bool isT2MemRegOffset() const {
1702     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1703         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1704       return false;
1705     // Only lsl #{0, 1, 2, 3} allowed.
1706     if (Memory.ShiftType == ARM_AM::no_shift)
1707       return true;
1708     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1709       return false;
1710     return true;
1711   }
1712 
1713   bool isMemThumbRR() const {
1714     // Thumb reg+reg addressing is simple. Just two registers, a base and
1715     // an offset. No shifts, negations or any other complicating factors.
1716     if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1717         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1718       return false;
1719     return isARMLowRegister(Memory.BaseRegNum) &&
1720       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1721   }
1722 
1723   bool isMemThumbRIs4() const {
1724     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1725         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1726       return false;
1727     // Immediate offset, multiple of 4 in range [0, 124].
1728     if (!Memory.OffsetImm) return true;
1729     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1730       int64_t Val = CE->getValue();
1731       return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1732     }
1733     return false;
1734   }
1735 
1736   bool isMemThumbRIs2() const {
1737     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1738         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1739       return false;
1740     // Immediate offset, multiple of 4 in range [0, 62].
1741     if (!Memory.OffsetImm) return true;
1742     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1743       int64_t Val = CE->getValue();
1744       return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1745     }
1746     return false;
1747   }
1748 
1749   bool isMemThumbRIs1() const {
1750     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1751         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1752       return false;
1753     // Immediate offset in range [0, 31].
1754     if (!Memory.OffsetImm) return true;
1755     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1756       int64_t Val = CE->getValue();
1757       return Val >= 0 && Val <= 31;
1758     }
1759     return false;
1760   }
1761 
1762   bool isMemThumbSPI() const {
1763     if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1764         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1765       return false;
1766     // Immediate offset, multiple of 4 in range [0, 1020].
1767     if (!Memory.OffsetImm) return true;
1768     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1769       int64_t Val = CE->getValue();
1770       return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1771     }
1772     return false;
1773   }
1774 
1775   bool isMemImm8s4Offset() const {
1776     // If we have an immediate that's not a constant, treat it as a label
1777     // reference needing a fixup. If it is a constant, it's something else
1778     // and we reject it.
1779     if (isImm() && !isa<MCConstantExpr>(getImm()))
1780       return true;
1781     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1782       return false;
1783     // Immediate offset a multiple of 4 in range [-1020, 1020].
1784     if (!Memory.OffsetImm) return true;
1785     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1786       int64_t Val = CE->getValue();
1787       // Special case, #-0 is std::numeric_limits<int32_t>::min().
1788       return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1789              Val == std::numeric_limits<int32_t>::min();
1790     }
1791     return false;
1792   }
1793 
1794   bool isMemImm7s4Offset() const {
1795     // If we have an immediate that's not a constant, treat it as a label
1796     // reference needing a fixup. If it is a constant, it's something else
1797     // and we reject it.
1798     if (isImm() && !isa<MCConstantExpr>(getImm()))
1799       return true;
1800     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1801         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1802             Memory.BaseRegNum))
1803       return false;
1804     // Immediate offset a multiple of 4 in range [-508, 508].
1805     if (!Memory.OffsetImm) return true;
1806     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1807       int64_t Val = CE->getValue();
1808       // Special case, #-0 is INT32_MIN.
1809       return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1810     }
1811     return false;
1812   }
1813 
1814   bool isMemImm0_1020s4Offset() const {
1815     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1816       return false;
1817     // Immediate offset a multiple of 4 in range [0, 1020].
1818     if (!Memory.OffsetImm) return true;
1819     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1820       int64_t Val = CE->getValue();
1821       return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1822     }
1823     return false;
1824   }
1825 
1826   bool isMemImm8Offset() const {
1827     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1828       return false;
1829     // Base reg of PC isn't allowed for these encodings.
1830     if (Memory.BaseRegNum == ARM::PC) return false;
1831     // Immediate offset in range [-255, 255].
1832     if (!Memory.OffsetImm) return true;
1833     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1834       int64_t Val = CE->getValue();
1835       return (Val == std::numeric_limits<int32_t>::min()) ||
1836              (Val > -256 && Val < 256);
1837     }
1838     return false;
1839   }
1840 
1841   template<unsigned Bits, unsigned RegClassID>
1842   bool isMemImm7ShiftedOffset() const {
1843     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1844         !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1845       return false;
1846 
1847     // Expect an immediate offset equal to an element of the range
1848     // [-127, 127], shifted left by Bits.
1849 
1850     if (!Memory.OffsetImm) return true;
1851     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1852       int64_t Val = CE->getValue();
1853 
1854       // INT32_MIN is a special-case value (indicating the encoding with
1855       // zero offset and the subtract bit set)
1856       if (Val == INT32_MIN)
1857         return true;
1858 
1859       unsigned Divisor = 1U << Bits;
1860 
1861       // Check that the low bits are zero
1862       if (Val % Divisor != 0)
1863         return false;
1864 
1865       // Check that the remaining offset is within range.
1866       Val /= Divisor;
1867       return (Val >= -127 && Val <= 127);
1868     }
1869     return false;
1870   }
1871 
1872   template <int shift> bool isMemRegRQOffset() const {
1873     if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1874       return false;
1875 
1876     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1877             Memory.BaseRegNum))
1878       return false;
1879     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1880             Memory.OffsetRegNum))
1881       return false;
1882 
1883     if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1884       return false;
1885 
1886     if (shift > 0 &&
1887         (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1888       return false;
1889 
1890     return true;
1891   }
1892 
1893   template <int shift> bool isMemRegQOffset() const {
1894     if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1895       return false;
1896 
1897     if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1898             Memory.BaseRegNum))
1899       return false;
1900 
1901     if (!Memory.OffsetImm)
1902       return true;
1903     static_assert(shift < 56,
1904                   "Such that we dont shift by a value higher than 62");
1905     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1906       int64_t Val = CE->getValue();
1907 
1908       // The value must be a multiple of (1 << shift)
1909       if ((Val & ((1U << shift) - 1)) != 0)
1910         return false;
1911 
1912       // And be in the right range, depending on the amount that it is shifted
1913       // by.  Shift 0, is equal to 7 unsigned bits, the sign bit is set
1914       // separately.
1915       int64_t Range = (1U << (7 + shift)) - 1;
1916       return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1917     }
1918     return false;
1919   }
1920 
1921   bool isMemPosImm8Offset() const {
1922     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1923       return false;
1924     // Immediate offset in range [0, 255].
1925     if (!Memory.OffsetImm) return true;
1926     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1927       int64_t Val = CE->getValue();
1928       return Val >= 0 && Val < 256;
1929     }
1930     return false;
1931   }
1932 
1933   bool isMemNegImm8Offset() const {
1934     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1935       return false;
1936     // Base reg of PC isn't allowed for these encodings.
1937     if (Memory.BaseRegNum == ARM::PC) return false;
1938     // Immediate offset in range [-255, -1].
1939     if (!Memory.OffsetImm) return false;
1940     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1941       int64_t Val = CE->getValue();
1942       return (Val == std::numeric_limits<int32_t>::min()) ||
1943              (Val > -256 && Val < 0);
1944     }
1945     return false;
1946   }
1947 
1948   bool isMemUImm12Offset() const {
1949     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1950       return false;
1951     // Immediate offset in range [0, 4095].
1952     if (!Memory.OffsetImm) return true;
1953     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1954       int64_t Val = CE->getValue();
1955       return (Val >= 0 && Val < 4096);
1956     }
1957     return false;
1958   }
1959 
1960   bool isMemImm12Offset() const {
1961     // If we have an immediate that's not a constant, treat it as a label
1962     // reference needing a fixup. If it is a constant, it's something else
1963     // and we reject it.
1964 
1965     if (isImm() && !isa<MCConstantExpr>(getImm()))
1966       return true;
1967 
1968     if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1969       return false;
1970     // Immediate offset in range [-4095, 4095].
1971     if (!Memory.OffsetImm) return true;
1972     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1973       int64_t Val = CE->getValue();
1974       return (Val > -4096 && Val < 4096) ||
1975              (Val == std::numeric_limits<int32_t>::min());
1976     }
1977     // If we have an immediate that's not a constant, treat it as a
1978     // symbolic expression needing a fixup.
1979     return true;
1980   }
1981 
1982   bool isConstPoolAsmImm() const {
1983     // Delay processing of Constant Pool Immediate, this will turn into
1984     // a constant. Match no other operand
1985     return (isConstantPoolImm());
1986   }
1987 
1988   bool isPostIdxImm8() const {
1989     if (!isImm()) return false;
1990     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1991     if (!CE) return false;
1992     int64_t Val = CE->getValue();
1993     return (Val > -256 && Val < 256) ||
1994            (Val == std::numeric_limits<int32_t>::min());
1995   }
1996 
1997   bool isPostIdxImm8s4() const {
1998     if (!isImm()) return false;
1999     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2000     if (!CE) return false;
2001     int64_t Val = CE->getValue();
2002     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2003            (Val == std::numeric_limits<int32_t>::min());
2004   }
2005 
2006   bool isMSRMask() const { return Kind == k_MSRMask; }
2007   bool isBankedReg() const { return Kind == k_BankedReg; }
2008   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2009 
2010   // NEON operands.
2011   bool isSingleSpacedVectorList() const {
2012     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2013   }
2014 
2015   bool isDoubleSpacedVectorList() const {
2016     return Kind == k_VectorList && VectorList.isDoubleSpaced;
2017   }
2018 
2019   bool isVecListOneD() const {
2020     if (!isSingleSpacedVectorList()) return false;
2021     return VectorList.Count == 1;
2022   }
2023 
2024   bool isVecListTwoMQ() const {
2025     return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2026            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2027                VectorList.RegNum);
2028   }
2029 
2030   bool isVecListDPair() const {
2031     if (!isSingleSpacedVectorList()) return false;
2032     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2033               .contains(VectorList.RegNum));
2034   }
2035 
2036   bool isVecListThreeD() const {
2037     if (!isSingleSpacedVectorList()) return false;
2038     return VectorList.Count == 3;
2039   }
2040 
2041   bool isVecListFourD() const {
2042     if (!isSingleSpacedVectorList()) return false;
2043     return VectorList.Count == 4;
2044   }
2045 
2046   bool isVecListDPairSpaced() const {
2047     if (Kind != k_VectorList) return false;
2048     if (isSingleSpacedVectorList()) return false;
2049     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2050               .contains(VectorList.RegNum));
2051   }
2052 
2053   bool isVecListThreeQ() const {
2054     if (!isDoubleSpacedVectorList()) return false;
2055     return VectorList.Count == 3;
2056   }
2057 
2058   bool isVecListFourQ() const {
2059     if (!isDoubleSpacedVectorList()) return false;
2060     return VectorList.Count == 4;
2061   }
2062 
2063   bool isVecListFourMQ() const {
2064     return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2065            ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2066                VectorList.RegNum);
2067   }
2068 
2069   bool isSingleSpacedVectorAllLanes() const {
2070     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2071   }
2072 
2073   bool isDoubleSpacedVectorAllLanes() const {
2074     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2075   }
2076 
2077   bool isVecListOneDAllLanes() const {
2078     if (!isSingleSpacedVectorAllLanes()) return false;
2079     return VectorList.Count == 1;
2080   }
2081 
2082   bool isVecListDPairAllLanes() const {
2083     if (!isSingleSpacedVectorAllLanes()) return false;
2084     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2085               .contains(VectorList.RegNum));
2086   }
2087 
2088   bool isVecListDPairSpacedAllLanes() const {
2089     if (!isDoubleSpacedVectorAllLanes()) return false;
2090     return VectorList.Count == 2;
2091   }
2092 
2093   bool isVecListThreeDAllLanes() const {
2094     if (!isSingleSpacedVectorAllLanes()) return false;
2095     return VectorList.Count == 3;
2096   }
2097 
2098   bool isVecListThreeQAllLanes() const {
2099     if (!isDoubleSpacedVectorAllLanes()) return false;
2100     return VectorList.Count == 3;
2101   }
2102 
2103   bool isVecListFourDAllLanes() const {
2104     if (!isSingleSpacedVectorAllLanes()) return false;
2105     return VectorList.Count == 4;
2106   }
2107 
2108   bool isVecListFourQAllLanes() const {
2109     if (!isDoubleSpacedVectorAllLanes()) return false;
2110     return VectorList.Count == 4;
2111   }
2112 
2113   bool isSingleSpacedVectorIndexed() const {
2114     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2115   }
2116 
2117   bool isDoubleSpacedVectorIndexed() const {
2118     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2119   }
2120 
2121   bool isVecListOneDByteIndexed() const {
2122     if (!isSingleSpacedVectorIndexed()) return false;
2123     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2124   }
2125 
2126   bool isVecListOneDHWordIndexed() const {
2127     if (!isSingleSpacedVectorIndexed()) return false;
2128     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2129   }
2130 
2131   bool isVecListOneDWordIndexed() const {
2132     if (!isSingleSpacedVectorIndexed()) return false;
2133     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2134   }
2135 
2136   bool isVecListTwoDByteIndexed() const {
2137     if (!isSingleSpacedVectorIndexed()) return false;
2138     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2139   }
2140 
2141   bool isVecListTwoDHWordIndexed() const {
2142     if (!isSingleSpacedVectorIndexed()) return false;
2143     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2144   }
2145 
2146   bool isVecListTwoQWordIndexed() const {
2147     if (!isDoubleSpacedVectorIndexed()) return false;
2148     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2149   }
2150 
2151   bool isVecListTwoQHWordIndexed() const {
2152     if (!isDoubleSpacedVectorIndexed()) return false;
2153     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2154   }
2155 
2156   bool isVecListTwoDWordIndexed() const {
2157     if (!isSingleSpacedVectorIndexed()) return false;
2158     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2159   }
2160 
2161   bool isVecListThreeDByteIndexed() const {
2162     if (!isSingleSpacedVectorIndexed()) return false;
2163     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2164   }
2165 
2166   bool isVecListThreeDHWordIndexed() const {
2167     if (!isSingleSpacedVectorIndexed()) return false;
2168     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2169   }
2170 
2171   bool isVecListThreeQWordIndexed() const {
2172     if (!isDoubleSpacedVectorIndexed()) return false;
2173     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2174   }
2175 
2176   bool isVecListThreeQHWordIndexed() const {
2177     if (!isDoubleSpacedVectorIndexed()) return false;
2178     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2179   }
2180 
2181   bool isVecListThreeDWordIndexed() const {
2182     if (!isSingleSpacedVectorIndexed()) return false;
2183     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2184   }
2185 
2186   bool isVecListFourDByteIndexed() const {
2187     if (!isSingleSpacedVectorIndexed()) return false;
2188     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2189   }
2190 
2191   bool isVecListFourDHWordIndexed() const {
2192     if (!isSingleSpacedVectorIndexed()) return false;
2193     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2194   }
2195 
2196   bool isVecListFourQWordIndexed() const {
2197     if (!isDoubleSpacedVectorIndexed()) return false;
2198     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2199   }
2200 
2201   bool isVecListFourQHWordIndexed() const {
2202     if (!isDoubleSpacedVectorIndexed()) return false;
2203     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2204   }
2205 
2206   bool isVecListFourDWordIndexed() const {
2207     if (!isSingleSpacedVectorIndexed()) return false;
2208     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2209   }
2210 
2211   bool isVectorIndex() const { return Kind == k_VectorIndex; }
2212 
2213   template <unsigned NumLanes>
2214   bool isVectorIndexInRange() const {
2215     if (Kind != k_VectorIndex) return false;
2216     return VectorIndex.Val < NumLanes;
2217   }
2218 
2219   bool isVectorIndex8()  const { return isVectorIndexInRange<8>(); }
2220   bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2221   bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2222   bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2223 
2224   template<int PermittedValue, int OtherPermittedValue>
2225   bool isMVEPairVectorIndex() const {
2226     if (Kind != k_VectorIndex) return false;
2227     return VectorIndex.Val == PermittedValue ||
2228            VectorIndex.Val == OtherPermittedValue;
2229   }
2230 
2231   bool isNEONi8splat() const {
2232     if (!isImm()) return false;
2233     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2234     // Must be a constant.
2235     if (!CE) return false;
2236     int64_t Value = CE->getValue();
2237     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2238     // value.
2239     return Value >= 0 && Value < 256;
2240   }
2241 
2242   bool isNEONi16splat() const {
2243     if (isNEONByteReplicate(2))
2244       return false; // Leave that for bytes replication and forbid by default.
2245     if (!isImm())
2246       return false;
2247     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2248     // Must be a constant.
2249     if (!CE) return false;
2250     unsigned Value = CE->getValue();
2251     return ARM_AM::isNEONi16splat(Value);
2252   }
2253 
2254   bool isNEONi16splatNot() const {
2255     if (!isImm())
2256       return false;
2257     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2258     // Must be a constant.
2259     if (!CE) return false;
2260     unsigned Value = CE->getValue();
2261     return ARM_AM::isNEONi16splat(~Value & 0xffff);
2262   }
2263 
2264   bool isNEONi32splat() const {
2265     if (isNEONByteReplicate(4))
2266       return false; // Leave that for bytes replication and forbid by default.
2267     if (!isImm())
2268       return false;
2269     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2270     // Must be a constant.
2271     if (!CE) return false;
2272     unsigned Value = CE->getValue();
2273     return ARM_AM::isNEONi32splat(Value);
2274   }
2275 
2276   bool isNEONi32splatNot() const {
2277     if (!isImm())
2278       return false;
2279     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2280     // Must be a constant.
2281     if (!CE) return false;
2282     unsigned Value = CE->getValue();
2283     return ARM_AM::isNEONi32splat(~Value);
2284   }
2285 
2286   static bool isValidNEONi32vmovImm(int64_t Value) {
2287     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2288     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2289     return ((Value & 0xffffffffffffff00) == 0) ||
2290            ((Value & 0xffffffffffff00ff) == 0) ||
2291            ((Value & 0xffffffffff00ffff) == 0) ||
2292            ((Value & 0xffffffff00ffffff) == 0) ||
2293            ((Value & 0xffffffffffff00ff) == 0xff) ||
2294            ((Value & 0xffffffffff00ffff) == 0xffff);
2295   }
2296 
2297   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2298     assert((Width == 8 || Width == 16 || Width == 32) &&
2299            "Invalid element width");
2300     assert(NumElems * Width <= 64 && "Invalid result width");
2301 
2302     if (!isImm())
2303       return false;
2304     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2305     // Must be a constant.
2306     if (!CE)
2307       return false;
2308     int64_t Value = CE->getValue();
2309     if (!Value)
2310       return false; // Don't bother with zero.
2311     if (Inv)
2312       Value = ~Value;
2313 
2314     uint64_t Mask = (1ull << Width) - 1;
2315     uint64_t Elem = Value & Mask;
2316     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2317       return false;
2318     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2319       return false;
2320 
2321     for (unsigned i = 1; i < NumElems; ++i) {
2322       Value >>= Width;
2323       if ((Value & Mask) != Elem)
2324         return false;
2325     }
2326     return true;
2327   }
2328 
2329   bool isNEONByteReplicate(unsigned NumBytes) const {
2330     return isNEONReplicate(8, NumBytes, false);
2331   }
2332 
2333   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2334     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2335            "Invalid source width");
2336     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2337            "Invalid destination width");
2338     assert(FromW < ToW && "ToW is not less than FromW");
2339   }
2340 
2341   template<unsigned FromW, unsigned ToW>
2342   bool isNEONmovReplicate() const {
2343     checkNeonReplicateArgs(FromW, ToW);
2344     if (ToW == 64 && isNEONi64splat())
2345       return false;
2346     return isNEONReplicate(FromW, ToW / FromW, false);
2347   }
2348 
2349   template<unsigned FromW, unsigned ToW>
2350   bool isNEONinvReplicate() const {
2351     checkNeonReplicateArgs(FromW, ToW);
2352     return isNEONReplicate(FromW, ToW / FromW, true);
2353   }
2354 
2355   bool isNEONi32vmov() const {
2356     if (isNEONByteReplicate(4))
2357       return false; // Let it to be classified as byte-replicate case.
2358     if (!isImm())
2359       return false;
2360     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2361     // Must be a constant.
2362     if (!CE)
2363       return false;
2364     return isValidNEONi32vmovImm(CE->getValue());
2365   }
2366 
2367   bool isNEONi32vmovNeg() const {
2368     if (!isImm()) return false;
2369     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2370     // Must be a constant.
2371     if (!CE) return false;
2372     return isValidNEONi32vmovImm(~CE->getValue());
2373   }
2374 
2375   bool isNEONi64splat() const {
2376     if (!isImm()) return false;
2377     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2378     // Must be a constant.
2379     if (!CE) return false;
2380     uint64_t Value = CE->getValue();
2381     // i64 value with each byte being either 0 or 0xff.
2382     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2383       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2384     return true;
2385   }
2386 
2387   template<int64_t Angle, int64_t Remainder>
2388   bool isComplexRotation() const {
2389     if (!isImm()) return false;
2390 
2391     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2392     if (!CE) return false;
2393     uint64_t Value = CE->getValue();
2394 
2395     return (Value % Angle == Remainder && Value <= 270);
2396   }
2397 
2398   bool isMVELongShift() const {
2399     if (!isImm()) return false;
2400     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2401     // Must be a constant.
2402     if (!CE) return false;
2403     uint64_t Value = CE->getValue();
2404     return Value >= 1 && Value <= 32;
2405   }
2406 
2407   bool isMveSaturateOp() const {
2408     if (!isImm()) return false;
2409     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2410     if (!CE) return false;
2411     uint64_t Value = CE->getValue();
2412     return Value == 48 || Value == 64;
2413   }
2414 
2415   bool isITCondCodeNoAL() const {
2416     if (!isITCondCode()) return false;
2417     ARMCC::CondCodes CC = getCondCode();
2418     return CC != ARMCC::AL;
2419   }
2420 
2421   bool isITCondCodeRestrictedI() const {
2422     if (!isITCondCode())
2423       return false;
2424     ARMCC::CondCodes CC = getCondCode();
2425     return CC == ARMCC::EQ || CC == ARMCC::NE;
2426   }
2427 
2428   bool isITCondCodeRestrictedS() const {
2429     if (!isITCondCode())
2430       return false;
2431     ARMCC::CondCodes CC = getCondCode();
2432     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2433            CC == ARMCC::GE;
2434   }
2435 
2436   bool isITCondCodeRestrictedU() const {
2437     if (!isITCondCode())
2438       return false;
2439     ARMCC::CondCodes CC = getCondCode();
2440     return CC == ARMCC::HS || CC == ARMCC::HI;
2441   }
2442 
2443   bool isITCondCodeRestrictedFP() const {
2444     if (!isITCondCode())
2445       return false;
2446     ARMCC::CondCodes CC = getCondCode();
2447     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2448            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2449   }
2450 
2451   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2452     // Add as immediates when possible.  Null MCExpr = 0.
2453     if (!Expr)
2454       Inst.addOperand(MCOperand::createImm(0));
2455     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2456       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2457     else
2458       Inst.addOperand(MCOperand::createExpr(Expr));
2459   }
2460 
2461   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2462     assert(N == 1 && "Invalid number of operands!");
2463     addExpr(Inst, getImm());
2464   }
2465 
2466   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2467     assert(N == 1 && "Invalid number of operands!");
2468     addExpr(Inst, getImm());
2469   }
2470 
2471   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2472     assert(N == 2 && "Invalid number of operands!");
2473     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2474     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2475     Inst.addOperand(MCOperand::createReg(RegNum));
2476   }
2477 
2478   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2479     assert(N == 3 && "Invalid number of operands!");
2480     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2481     unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2482     Inst.addOperand(MCOperand::createReg(RegNum));
2483     Inst.addOperand(MCOperand::createReg(0));
2484   }
2485 
2486   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2487     assert(N == 4 && "Invalid number of operands!");
2488     addVPTPredNOperands(Inst, N-1);
2489     unsigned RegNum;
2490     if (getVPTPred() == ARMVCC::None) {
2491       RegNum = 0;
2492     } else {
2493       unsigned NextOpIndex = Inst.getNumOperands();
2494       const MCInstrDesc &MCID =
2495           ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
2496       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2497       assert(TiedOp >= 0 &&
2498              "Inactive register in vpred_r is not tied to an output!");
2499       RegNum = Inst.getOperand(TiedOp).getReg();
2500     }
2501     Inst.addOperand(MCOperand::createReg(RegNum));
2502   }
2503 
2504   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2505     assert(N == 1 && "Invalid number of operands!");
2506     Inst.addOperand(MCOperand::createImm(getCoproc()));
2507   }
2508 
2509   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2510     assert(N == 1 && "Invalid number of operands!");
2511     Inst.addOperand(MCOperand::createImm(getCoproc()));
2512   }
2513 
2514   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2515     assert(N == 1 && "Invalid number of operands!");
2516     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2517   }
2518 
2519   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2520     assert(N == 1 && "Invalid number of operands!");
2521     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2522   }
2523 
2524   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2525     assert(N == 1 && "Invalid number of operands!");
2526     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2527   }
2528 
2529   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2530     assert(N == 1 && "Invalid number of operands!");
2531     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2532   }
2533 
2534   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2535     assert(N == 1 && "Invalid number of operands!");
2536     Inst.addOperand(MCOperand::createReg(getReg()));
2537   }
2538 
2539   void addRegOperands(MCInst &Inst, unsigned N) const {
2540     assert(N == 1 && "Invalid number of operands!");
2541     Inst.addOperand(MCOperand::createReg(getReg()));
2542   }
2543 
2544   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2545     assert(N == 3 && "Invalid number of operands!");
2546     assert(isRegShiftedReg() &&
2547            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2548     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2549     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2550     Inst.addOperand(MCOperand::createImm(
2551       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2552   }
2553 
2554   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2555     assert(N == 2 && "Invalid number of operands!");
2556     assert(isRegShiftedImm() &&
2557            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2558     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2559     // Shift of #32 is encoded as 0 where permitted
2560     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2561     Inst.addOperand(MCOperand::createImm(
2562       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2563   }
2564 
2565   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2566     assert(N == 1 && "Invalid number of operands!");
2567     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2568                                          ShifterImm.Imm));
2569   }
2570 
2571   void addRegListOperands(MCInst &Inst, unsigned N) const {
2572     assert(N == 1 && "Invalid number of operands!");
2573     const SmallVectorImpl<unsigned> &RegList = getRegList();
2574     for (unsigned Reg : RegList)
2575       Inst.addOperand(MCOperand::createReg(Reg));
2576   }
2577 
2578   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2579     assert(N == 1 && "Invalid number of operands!");
2580     const SmallVectorImpl<unsigned> &RegList = getRegList();
2581     for (unsigned Reg : RegList)
2582       Inst.addOperand(MCOperand::createReg(Reg));
2583   }
2584 
2585   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2586     addRegListOperands(Inst, N);
2587   }
2588 
2589   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2590     addRegListOperands(Inst, N);
2591   }
2592 
2593   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2594     addRegListOperands(Inst, N);
2595   }
2596 
2597   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2598     addRegListOperands(Inst, N);
2599   }
2600 
2601   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2602     assert(N == 1 && "Invalid number of operands!");
2603     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2604     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2605   }
2606 
2607   void addModImmOperands(MCInst &Inst, unsigned N) const {
2608     assert(N == 1 && "Invalid number of operands!");
2609 
2610     // Support for fixups (MCFixup)
2611     if (isImm())
2612       return addImmOperands(Inst, N);
2613 
2614     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2615   }
2616 
2617   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2618     assert(N == 1 && "Invalid number of operands!");
2619     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2620     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2621     Inst.addOperand(MCOperand::createImm(Enc));
2622   }
2623 
2624   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2625     assert(N == 1 && "Invalid number of operands!");
2626     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2627     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2628     Inst.addOperand(MCOperand::createImm(Enc));
2629   }
2630 
2631   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2632     assert(N == 1 && "Invalid number of operands!");
2633     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2634     uint32_t Val = -CE->getValue();
2635     Inst.addOperand(MCOperand::createImm(Val));
2636   }
2637 
2638   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2639     assert(N == 1 && "Invalid number of operands!");
2640     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2641     uint32_t Val = -CE->getValue();
2642     Inst.addOperand(MCOperand::createImm(Val));
2643   }
2644 
2645   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2646     assert(N == 1 && "Invalid number of operands!");
2647     // Munge the lsb/width into a bitfield mask.
2648     unsigned lsb = Bitfield.LSB;
2649     unsigned width = Bitfield.Width;
2650     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2651     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2652                       (32 - (lsb + width)));
2653     Inst.addOperand(MCOperand::createImm(Mask));
2654   }
2655 
2656   void addImmOperands(MCInst &Inst, unsigned N) const {
2657     assert(N == 1 && "Invalid number of operands!");
2658     addExpr(Inst, getImm());
2659   }
2660 
2661   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2662     assert(N == 1 && "Invalid number of operands!");
2663     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2664     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2665   }
2666 
2667   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2668     assert(N == 1 && "Invalid number of operands!");
2669     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2670     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2671   }
2672 
2673   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2674     assert(N == 1 && "Invalid number of operands!");
2675     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2676     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2677     Inst.addOperand(MCOperand::createImm(Val));
2678   }
2679 
2680   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2681     assert(N == 1 && "Invalid number of operands!");
2682     // FIXME: We really want to scale the value here, but the LDRD/STRD
2683     // instruction don't encode operands that way yet.
2684     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2685     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2686   }
2687 
2688   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2689     assert(N == 1 && "Invalid number of operands!");
2690     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2691     // instruction don't encode operands that way yet.
2692     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2693     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2694   }
2695 
2696   void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2697     assert(N == 1 && "Invalid number of operands!");
2698     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2699     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2700   }
2701 
2702   void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2703     assert(N == 1 && "Invalid number of operands!");
2704     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2705     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2706   }
2707 
2708   void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2709     assert(N == 1 && "Invalid number of operands!");
2710     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2711     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2712   }
2713 
2714   void addImm7Operands(MCInst &Inst, unsigned N) const {
2715     assert(N == 1 && "Invalid number of operands!");
2716     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2717     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2718   }
2719 
2720   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2721     assert(N == 1 && "Invalid number of operands!");
2722     // The immediate is scaled by four in the encoding and is stored
2723     // in the MCInst as such. Lop off the low two bits here.
2724     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2725     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2726   }
2727 
2728   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2729     assert(N == 1 && "Invalid number of operands!");
2730     // The immediate is scaled by four in the encoding and is stored
2731     // in the MCInst as such. Lop off the low two bits here.
2732     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2733     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2734   }
2735 
2736   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2737     assert(N == 1 && "Invalid number of operands!");
2738     // The immediate is scaled by four in the encoding and is stored
2739     // in the MCInst as such. Lop off the low two bits here.
2740     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2741     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2742   }
2743 
2744   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2745     assert(N == 1 && "Invalid number of operands!");
2746     // The constant encodes as the immediate-1, and we store in the instruction
2747     // the bits as encoded, so subtract off one here.
2748     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2749     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2750   }
2751 
2752   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2753     assert(N == 1 && "Invalid number of operands!");
2754     // The constant encodes as the immediate-1, and we store in the instruction
2755     // the bits as encoded, so subtract off one here.
2756     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2757     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2758   }
2759 
2760   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2761     assert(N == 1 && "Invalid number of operands!");
2762     // The constant encodes as the immediate, except for 32, which encodes as
2763     // zero.
2764     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2765     unsigned Imm = CE->getValue();
2766     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2767   }
2768 
2769   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2770     assert(N == 1 && "Invalid number of operands!");
2771     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2772     // the instruction as well.
2773     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2774     int Val = CE->getValue();
2775     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2776   }
2777 
2778   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2779     assert(N == 1 && "Invalid number of operands!");
2780     // The operand is actually a t2_so_imm, but we have its bitwise
2781     // negation in the assembly source, so twiddle it here.
2782     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2783     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2784   }
2785 
2786   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2787     assert(N == 1 && "Invalid number of operands!");
2788     // The operand is actually a t2_so_imm, but we have its
2789     // negation in the assembly source, so twiddle it here.
2790     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2791     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2792   }
2793 
2794   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2795     assert(N == 1 && "Invalid number of operands!");
2796     // The operand is actually an imm0_4095, but we have its
2797     // negation in the assembly source, so twiddle it here.
2798     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2799     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2800   }
2801 
2802   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2803     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2804       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2805       return;
2806     }
2807     const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2808     Inst.addOperand(MCOperand::createExpr(SR));
2809   }
2810 
2811   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2812     assert(N == 1 && "Invalid number of operands!");
2813     if (isImm()) {
2814       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2815       if (CE) {
2816         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2817         return;
2818       }
2819       const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2820       Inst.addOperand(MCOperand::createExpr(SR));
2821       return;
2822     }
2823 
2824     assert(isGPRMem()  && "Unknown value type!");
2825     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2826     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2827       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2828     else
2829       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2830   }
2831 
2832   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2833     assert(N == 1 && "Invalid number of operands!");
2834     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2835   }
2836 
2837   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2838     assert(N == 1 && "Invalid number of operands!");
2839     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2840   }
2841 
2842   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2843     assert(N == 1 && "Invalid number of operands!");
2844     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2845   }
2846 
2847   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2848     assert(N == 1 && "Invalid number of operands!");
2849     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2850   }
2851 
2852   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2853     assert(N == 1 && "Invalid number of operands!");
2854     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2855   }
2856 
2857   void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2858     assert(N == 1 && "Invalid number of operands!");
2859     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2860   }
2861 
2862   void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2863     assert(N == 1 && "Invalid number of operands!");
2864     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2865   }
2866 
2867   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2868     assert(N == 1 && "Invalid number of operands!");
2869     if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2870       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2871     else
2872       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2873   }
2874 
2875   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2876     assert(N == 1 && "Invalid number of operands!");
2877     assert(isImm() && "Not an immediate!");
2878 
2879     // If we have an immediate that's not a constant, treat it as a label
2880     // reference needing a fixup.
2881     if (!isa<MCConstantExpr>(getImm())) {
2882       Inst.addOperand(MCOperand::createExpr(getImm()));
2883       return;
2884     }
2885 
2886     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2887     int Val = CE->getValue();
2888     Inst.addOperand(MCOperand::createImm(Val));
2889   }
2890 
2891   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2892     assert(N == 2 && "Invalid number of operands!");
2893     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2894     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2895   }
2896 
2897   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2898     addAlignedMemoryOperands(Inst, N);
2899   }
2900 
2901   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2902     addAlignedMemoryOperands(Inst, N);
2903   }
2904 
2905   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2906     addAlignedMemoryOperands(Inst, N);
2907   }
2908 
2909   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2910     addAlignedMemoryOperands(Inst, N);
2911   }
2912 
2913   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2914     addAlignedMemoryOperands(Inst, N);
2915   }
2916 
2917   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2918     addAlignedMemoryOperands(Inst, N);
2919   }
2920 
2921   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2922     addAlignedMemoryOperands(Inst, N);
2923   }
2924 
2925   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2926     addAlignedMemoryOperands(Inst, N);
2927   }
2928 
2929   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2930     addAlignedMemoryOperands(Inst, N);
2931   }
2932 
2933   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2934     addAlignedMemoryOperands(Inst, N);
2935   }
2936 
2937   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2938     addAlignedMemoryOperands(Inst, N);
2939   }
2940 
2941   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2942     assert(N == 3 && "Invalid number of operands!");
2943     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2944     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2945     if (!Memory.OffsetRegNum) {
2946       if (!Memory.OffsetImm)
2947         Inst.addOperand(MCOperand::createImm(0));
2948       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2949         int32_t Val = CE->getValue();
2950         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2951         // Special case for #-0
2952         if (Val == std::numeric_limits<int32_t>::min())
2953           Val = 0;
2954         if (Val < 0)
2955           Val = -Val;
2956         Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2957         Inst.addOperand(MCOperand::createImm(Val));
2958       } else
2959         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2960     } else {
2961       // For register offset, we encode the shift type and negation flag
2962       // here.
2963       int32_t Val =
2964           ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2965                             Memory.ShiftImm, Memory.ShiftType);
2966       Inst.addOperand(MCOperand::createImm(Val));
2967     }
2968   }
2969 
2970   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2971     assert(N == 2 && "Invalid number of operands!");
2972     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2973     assert(CE && "non-constant AM2OffsetImm operand!");
2974     int32_t Val = CE->getValue();
2975     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2976     // Special case for #-0
2977     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2978     if (Val < 0) Val = -Val;
2979     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2980     Inst.addOperand(MCOperand::createReg(0));
2981     Inst.addOperand(MCOperand::createImm(Val));
2982   }
2983 
2984   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2985     assert(N == 3 && "Invalid number of operands!");
2986     // If we have an immediate that's not a constant, treat it as a label
2987     // reference needing a fixup. If it is a constant, it's something else
2988     // and we reject it.
2989     if (isImm()) {
2990       Inst.addOperand(MCOperand::createExpr(getImm()));
2991       Inst.addOperand(MCOperand::createReg(0));
2992       Inst.addOperand(MCOperand::createImm(0));
2993       return;
2994     }
2995 
2996     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2997     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2998     if (!Memory.OffsetRegNum) {
2999       if (!Memory.OffsetImm)
3000         Inst.addOperand(MCOperand::createImm(0));
3001       else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3002         int32_t Val = CE->getValue();
3003         ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3004         // Special case for #-0
3005         if (Val == std::numeric_limits<int32_t>::min())
3006           Val = 0;
3007         if (Val < 0)
3008           Val = -Val;
3009         Val = ARM_AM::getAM3Opc(AddSub, Val);
3010         Inst.addOperand(MCOperand::createImm(Val));
3011       } else
3012         Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3013     } else {
3014       // For register offset, we encode the shift type and negation flag
3015       // here.
3016       int32_t Val =
3017           ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
3018       Inst.addOperand(MCOperand::createImm(Val));
3019     }
3020   }
3021 
3022   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3023     assert(N == 2 && "Invalid number of operands!");
3024     if (Kind == k_PostIndexRegister) {
3025       int32_t Val =
3026         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3027       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3028       Inst.addOperand(MCOperand::createImm(Val));
3029       return;
3030     }
3031 
3032     // Constant offset.
3033     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3034     int32_t Val = CE->getValue();
3035     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3036     // Special case for #-0
3037     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3038     if (Val < 0) Val = -Val;
3039     Val = ARM_AM::getAM3Opc(AddSub, Val);
3040     Inst.addOperand(MCOperand::createReg(0));
3041     Inst.addOperand(MCOperand::createImm(Val));
3042   }
3043 
3044   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3045     assert(N == 2 && "Invalid number of operands!");
3046     // If we have an immediate that's not a constant, treat it as a label
3047     // reference needing a fixup. If it is a constant, it's something else
3048     // and we reject it.
3049     if (isImm()) {
3050       Inst.addOperand(MCOperand::createExpr(getImm()));
3051       Inst.addOperand(MCOperand::createImm(0));
3052       return;
3053     }
3054 
3055     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3056     if (!Memory.OffsetImm)
3057       Inst.addOperand(MCOperand::createImm(0));
3058     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3059       // The lower two bits are always zero and as such are not encoded.
3060       int32_t Val = CE->getValue() / 4;
3061       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3062       // Special case for #-0
3063       if (Val == std::numeric_limits<int32_t>::min())
3064         Val = 0;
3065       if (Val < 0)
3066         Val = -Val;
3067       Val = ARM_AM::getAM5Opc(AddSub, Val);
3068       Inst.addOperand(MCOperand::createImm(Val));
3069     } else
3070       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3071   }
3072 
3073   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3074     assert(N == 2 && "Invalid number of operands!");
3075     // If we have an immediate that's not a constant, treat it as a label
3076     // reference needing a fixup. If it is a constant, it's something else
3077     // and we reject it.
3078     if (isImm()) {
3079       Inst.addOperand(MCOperand::createExpr(getImm()));
3080       Inst.addOperand(MCOperand::createImm(0));
3081       return;
3082     }
3083 
3084     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3085     // The lower bit is always zero and as such is not encoded.
3086     if (!Memory.OffsetImm)
3087       Inst.addOperand(MCOperand::createImm(0));
3088     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3089       int32_t Val = CE->getValue() / 2;
3090       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
3091       // Special case for #-0
3092       if (Val == std::numeric_limits<int32_t>::min())
3093         Val = 0;
3094       if (Val < 0)
3095         Val = -Val;
3096       Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3097       Inst.addOperand(MCOperand::createImm(Val));
3098     } else
3099       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3100   }
3101 
3102   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3103     assert(N == 2 && "Invalid number of operands!");
3104     // If we have an immediate that's not a constant, treat it as a label
3105     // reference needing a fixup. If it is a constant, it's something else
3106     // and we reject it.
3107     if (isImm()) {
3108       Inst.addOperand(MCOperand::createExpr(getImm()));
3109       Inst.addOperand(MCOperand::createImm(0));
3110       return;
3111     }
3112 
3113     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3114     addExpr(Inst, Memory.OffsetImm);
3115   }
3116 
3117   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3118     assert(N == 2 && "Invalid number of operands!");
3119     // If we have an immediate that's not a constant, treat it as a label
3120     // reference needing a fixup. If it is a constant, it's something else
3121     // and we reject it.
3122     if (isImm()) {
3123       Inst.addOperand(MCOperand::createExpr(getImm()));
3124       Inst.addOperand(MCOperand::createImm(0));
3125       return;
3126     }
3127 
3128     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3129     addExpr(Inst, Memory.OffsetImm);
3130   }
3131 
3132   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3133     assert(N == 2 && "Invalid number of operands!");
3134     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3135     if (!Memory.OffsetImm)
3136       Inst.addOperand(MCOperand::createImm(0));
3137     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3138       // The lower two bits are always zero and as such are not encoded.
3139       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3140     else
3141       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3142   }
3143 
3144   void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3145     assert(N == 2 && "Invalid number of operands!");
3146     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3147     addExpr(Inst, Memory.OffsetImm);
3148   }
3149 
3150   void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3151     assert(N == 2 && "Invalid number of operands!");
3152     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3153     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3154   }
3155 
3156   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3157     assert(N == 2 && "Invalid number of operands!");
3158     // If this is an immediate, it's a label reference.
3159     if (isImm()) {
3160       addExpr(Inst, getImm());
3161       Inst.addOperand(MCOperand::createImm(0));
3162       return;
3163     }
3164 
3165     // Otherwise, it's a normal memory reg+offset.
3166     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3167     addExpr(Inst, Memory.OffsetImm);
3168   }
3169 
3170   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3171     assert(N == 2 && "Invalid number of operands!");
3172     // If this is an immediate, it's a label reference.
3173     if (isImm()) {
3174       addExpr(Inst, getImm());
3175       Inst.addOperand(MCOperand::createImm(0));
3176       return;
3177     }
3178 
3179     // Otherwise, it's a normal memory reg+offset.
3180     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3181     addExpr(Inst, Memory.OffsetImm);
3182   }
3183 
3184   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3185     assert(N == 1 && "Invalid number of operands!");
3186     // This is container for the immediate that we will create the constant
3187     // pool from
3188     addExpr(Inst, getConstantPoolImm());
3189   }
3190 
3191   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3192     assert(N == 2 && "Invalid number of operands!");
3193     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3194     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3195   }
3196 
3197   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3198     assert(N == 2 && "Invalid number of operands!");
3199     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3200     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3201   }
3202 
3203   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3204     assert(N == 3 && "Invalid number of operands!");
3205     unsigned Val =
3206       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
3207                         Memory.ShiftImm, Memory.ShiftType);
3208     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3209     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3210     Inst.addOperand(MCOperand::createImm(Val));
3211   }
3212 
3213   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3214     assert(N == 3 && "Invalid number of operands!");
3215     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3216     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3217     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3218   }
3219 
3220   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3221     assert(N == 2 && "Invalid number of operands!");
3222     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3223     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3224   }
3225 
3226   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3227     assert(N == 2 && "Invalid number of operands!");
3228     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3229     if (!Memory.OffsetImm)
3230       Inst.addOperand(MCOperand::createImm(0));
3231     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3232       // The lower two bits are always zero and as such are not encoded.
3233       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3234     else
3235       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3236   }
3237 
3238   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3239     assert(N == 2 && "Invalid number of operands!");
3240     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3241     if (!Memory.OffsetImm)
3242       Inst.addOperand(MCOperand::createImm(0));
3243     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3244       Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3245     else
3246       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3247   }
3248 
3249   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3250     assert(N == 2 && "Invalid number of operands!");
3251     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3252     addExpr(Inst, Memory.OffsetImm);
3253   }
3254 
3255   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3256     assert(N == 2 && "Invalid number of operands!");
3257     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3258     if (!Memory.OffsetImm)
3259       Inst.addOperand(MCOperand::createImm(0));
3260     else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3261       // The lower two bits are always zero and as such are not encoded.
3262       Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3263     else
3264       Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3265   }
3266 
3267   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3268     assert(N == 1 && "Invalid number of operands!");
3269     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3270     assert(CE && "non-constant post-idx-imm8 operand!");
3271     int Imm = CE->getValue();
3272     bool isAdd = Imm >= 0;
3273     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3274     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3275     Inst.addOperand(MCOperand::createImm(Imm));
3276   }
3277 
3278   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3279     assert(N == 1 && "Invalid number of operands!");
3280     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3281     assert(CE && "non-constant post-idx-imm8s4 operand!");
3282     int Imm = CE->getValue();
3283     bool isAdd = Imm >= 0;
3284     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3285     // Immediate is scaled by 4.
3286     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3287     Inst.addOperand(MCOperand::createImm(Imm));
3288   }
3289 
3290   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3291     assert(N == 2 && "Invalid number of operands!");
3292     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3293     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3294   }
3295 
3296   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3297     assert(N == 2 && "Invalid number of operands!");
3298     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3299     // The sign, shift type, and shift amount are encoded in a single operand
3300     // using the AM2 encoding helpers.
3301     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3302     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3303                                      PostIdxReg.ShiftTy);
3304     Inst.addOperand(MCOperand::createImm(Imm));
3305   }
3306 
3307   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3308     assert(N == 1 && "Invalid number of operands!");
3309     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3310     Inst.addOperand(MCOperand::createImm(CE->getValue()));
3311   }
3312 
3313   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3314     assert(N == 1 && "Invalid number of operands!");
3315     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3316   }
3317 
3318   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3319     assert(N == 1 && "Invalid number of operands!");
3320     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3321   }
3322 
3323   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3324     assert(N == 1 && "Invalid number of operands!");
3325     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3326   }
3327 
3328   void addVecListOperands(MCInst &Inst, unsigned N) const {
3329     assert(N == 1 && "Invalid number of operands!");
3330     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3331   }
3332 
3333   void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3334     assert(N == 1 && "Invalid number of operands!");
3335 
3336     // When we come here, the VectorList field will identify a range
3337     // of q-registers by its base register and length, and it will
3338     // have already been error-checked to be the expected length of
3339     // range and contain only q-regs in the range q0-q7. So we can
3340     // count on the base register being in the range q0-q6 (for 2
3341     // regs) or q0-q4 (for 4)
3342     //
3343     // The MVE instructions taking a register range of this kind will
3344     // need an operand in the MQQPR or MQQQQPR class, representing the
3345     // entire range as a unit. So we must translate into that class,
3346     // by finding the index of the base register in the MQPR reg
3347     // class, and returning the super-register at the corresponding
3348     // index in the target class.
3349 
3350     const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3351     const MCRegisterClass *RC_out =
3352         (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3353                                 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3354 
3355     unsigned I, E = RC_out->getNumRegs();
3356     for (I = 0; I < E; I++)
3357       if (RC_in->getRegister(I) == VectorList.RegNum)
3358         break;
3359     assert(I < E && "Invalid vector list start register!");
3360 
3361     Inst.addOperand(MCOperand::createReg(RC_out->getRegister(I)));
3362   }
3363 
3364   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3365     assert(N == 2 && "Invalid number of operands!");
3366     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3367     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3368   }
3369 
3370   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3371     assert(N == 1 && "Invalid number of operands!");
3372     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3373   }
3374 
3375   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3376     assert(N == 1 && "Invalid number of operands!");
3377     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3378   }
3379 
3380   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3381     assert(N == 1 && "Invalid number of operands!");
3382     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3383   }
3384 
3385   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3386     assert(N == 1 && "Invalid number of operands!");
3387     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3388   }
3389 
3390   void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3391     assert(N == 1 && "Invalid number of operands!");
3392     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3393   }
3394 
3395   void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3396     assert(N == 1 && "Invalid number of operands!");
3397     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3398   }
3399 
3400   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3401     assert(N == 1 && "Invalid number of operands!");
3402     // The immediate encodes the type of constant as well as the value.
3403     // Mask in that this is an i8 splat.
3404     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3405     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3406   }
3407 
3408   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3409     assert(N == 1 && "Invalid number of operands!");
3410     // The immediate encodes the type of constant as well as the value.
3411     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3412     unsigned Value = CE->getValue();
3413     Value = ARM_AM::encodeNEONi16splat(Value);
3414     Inst.addOperand(MCOperand::createImm(Value));
3415   }
3416 
3417   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3418     assert(N == 1 && "Invalid number of operands!");
3419     // The immediate encodes the type of constant as well as the value.
3420     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3421     unsigned Value = CE->getValue();
3422     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
3423     Inst.addOperand(MCOperand::createImm(Value));
3424   }
3425 
3426   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3427     assert(N == 1 && "Invalid number of operands!");
3428     // The immediate encodes the type of constant as well as the value.
3429     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3430     unsigned Value = CE->getValue();
3431     Value = ARM_AM::encodeNEONi32splat(Value);
3432     Inst.addOperand(MCOperand::createImm(Value));
3433   }
3434 
3435   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3436     assert(N == 1 && "Invalid number of operands!");
3437     // The immediate encodes the type of constant as well as the value.
3438     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3439     unsigned Value = CE->getValue();
3440     Value = ARM_AM::encodeNEONi32splat(~Value);
3441     Inst.addOperand(MCOperand::createImm(Value));
3442   }
3443 
3444   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3445     // The immediate encodes the type of constant as well as the value.
3446     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3447     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3448             Inst.getOpcode() == ARM::VMOVv16i8) &&
3449           "All instructions that wants to replicate non-zero byte "
3450           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3451     unsigned Value = CE->getValue();
3452     if (Inv)
3453       Value = ~Value;
3454     unsigned B = Value & 0xff;
3455     B |= 0xe00; // cmode = 0b1110
3456     Inst.addOperand(MCOperand::createImm(B));
3457   }
3458 
3459   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3460     assert(N == 1 && "Invalid number of operands!");
3461     addNEONi8ReplicateOperands(Inst, true);
3462   }
3463 
3464   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3465     if (Value >= 256 && Value <= 0xffff)
3466       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3467     else if (Value > 0xffff && Value <= 0xffffff)
3468       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3469     else if (Value > 0xffffff)
3470       Value = (Value >> 24) | 0x600;
3471     return Value;
3472   }
3473 
3474   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3475     assert(N == 1 && "Invalid number of operands!");
3476     // The immediate encodes the type of constant as well as the value.
3477     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3478     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3479     Inst.addOperand(MCOperand::createImm(Value));
3480   }
3481 
3482   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3483     assert(N == 1 && "Invalid number of operands!");
3484     addNEONi8ReplicateOperands(Inst, false);
3485   }
3486 
3487   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3488     assert(N == 1 && "Invalid number of operands!");
3489     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3490     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3491             Inst.getOpcode() == ARM::VMOVv8i16 ||
3492             Inst.getOpcode() == ARM::VMVNv4i16 ||
3493             Inst.getOpcode() == ARM::VMVNv8i16) &&
3494           "All instructions that want to replicate non-zero half-word "
3495           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3496     uint64_t Value = CE->getValue();
3497     unsigned Elem = Value & 0xffff;
3498     if (Elem >= 256)
3499       Elem = (Elem >> 8) | 0x200;
3500     Inst.addOperand(MCOperand::createImm(Elem));
3501   }
3502 
3503   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3504     assert(N == 1 && "Invalid number of operands!");
3505     // The immediate encodes the type of constant as well as the value.
3506     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3507     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3508     Inst.addOperand(MCOperand::createImm(Value));
3509   }
3510 
3511   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3512     assert(N == 1 && "Invalid number of operands!");
3513     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3514     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3515             Inst.getOpcode() == ARM::VMOVv4i32 ||
3516             Inst.getOpcode() == ARM::VMVNv2i32 ||
3517             Inst.getOpcode() == ARM::VMVNv4i32) &&
3518           "All instructions that want to replicate non-zero word "
3519           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3520     uint64_t Value = CE->getValue();
3521     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3522     Inst.addOperand(MCOperand::createImm(Elem));
3523   }
3524 
3525   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3526     assert(N == 1 && "Invalid number of operands!");
3527     // The immediate encodes the type of constant as well as the value.
3528     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3529     uint64_t Value = CE->getValue();
3530     unsigned Imm = 0;
3531     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3532       Imm |= (Value & 1) << i;
3533     }
3534     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3535   }
3536 
3537   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3538     assert(N == 1 && "Invalid number of operands!");
3539     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3540     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3541   }
3542 
3543   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3544     assert(N == 1 && "Invalid number of operands!");
3545     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3546     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3547   }
3548 
3549   void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3550     assert(N == 1 && "Invalid number of operands!");
3551     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3552     unsigned Imm = CE->getValue();
3553     assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3554     Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3555   }
3556 
3557   void print(raw_ostream &OS) const override;
3558 
3559   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3560     auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3561     Op->ITMask.Mask = Mask;
3562     Op->StartLoc = S;
3563     Op->EndLoc = S;
3564     return Op;
3565   }
3566 
3567   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3568                                                     SMLoc S) {
3569     auto Op = std::make_unique<ARMOperand>(k_CondCode);
3570     Op->CC.Val = CC;
3571     Op->StartLoc = S;
3572     Op->EndLoc = S;
3573     return Op;
3574   }
3575 
3576   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3577                                                    SMLoc S) {
3578     auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3579     Op->VCC.Val = CC;
3580     Op->StartLoc = S;
3581     Op->EndLoc = S;
3582     return Op;
3583   }
3584 
3585   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3586     auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3587     Op->Cop.Val = CopVal;
3588     Op->StartLoc = S;
3589     Op->EndLoc = S;
3590     return Op;
3591   }
3592 
3593   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3594     auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3595     Op->Cop.Val = CopVal;
3596     Op->StartLoc = S;
3597     Op->EndLoc = S;
3598     return Op;
3599   }
3600 
3601   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3602                                                         SMLoc E) {
3603     auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3604     Op->Cop.Val = Val;
3605     Op->StartLoc = S;
3606     Op->EndLoc = E;
3607     return Op;
3608   }
3609 
3610   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3611     auto Op = std::make_unique<ARMOperand>(k_CCOut);
3612     Op->Reg.RegNum = RegNum;
3613     Op->StartLoc = S;
3614     Op->EndLoc = S;
3615     return Op;
3616   }
3617 
3618   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3619     auto Op = std::make_unique<ARMOperand>(k_Token);
3620     Op->Tok.Data = Str.data();
3621     Op->Tok.Length = Str.size();
3622     Op->StartLoc = S;
3623     Op->EndLoc = S;
3624     return Op;
3625   }
3626 
3627   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3628                                                SMLoc E) {
3629     auto Op = std::make_unique<ARMOperand>(k_Register);
3630     Op->Reg.RegNum = RegNum;
3631     Op->StartLoc = S;
3632     Op->EndLoc = E;
3633     return Op;
3634   }
3635 
3636   static std::unique_ptr<ARMOperand>
3637   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3638                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3639                         SMLoc E) {
3640     auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3641     Op->RegShiftedReg.ShiftTy = ShTy;
3642     Op->RegShiftedReg.SrcReg = SrcReg;
3643     Op->RegShiftedReg.ShiftReg = ShiftReg;
3644     Op->RegShiftedReg.ShiftImm = ShiftImm;
3645     Op->StartLoc = S;
3646     Op->EndLoc = E;
3647     return Op;
3648   }
3649 
3650   static std::unique_ptr<ARMOperand>
3651   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3652                          unsigned ShiftImm, SMLoc S, SMLoc E) {
3653     auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3654     Op->RegShiftedImm.ShiftTy = ShTy;
3655     Op->RegShiftedImm.SrcReg = SrcReg;
3656     Op->RegShiftedImm.ShiftImm = ShiftImm;
3657     Op->StartLoc = S;
3658     Op->EndLoc = E;
3659     return Op;
3660   }
3661 
3662   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3663                                                       SMLoc S, SMLoc E) {
3664     auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3665     Op->ShifterImm.isASR = isASR;
3666     Op->ShifterImm.Imm = Imm;
3667     Op->StartLoc = S;
3668     Op->EndLoc = E;
3669     return Op;
3670   }
3671 
3672   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3673                                                   SMLoc E) {
3674     auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3675     Op->RotImm.Imm = Imm;
3676     Op->StartLoc = S;
3677     Op->EndLoc = E;
3678     return Op;
3679   }
3680 
3681   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3682                                                   SMLoc S, SMLoc E) {
3683     auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3684     Op->ModImm.Bits = Bits;
3685     Op->ModImm.Rot = Rot;
3686     Op->StartLoc = S;
3687     Op->EndLoc = E;
3688     return Op;
3689   }
3690 
3691   static std::unique_ptr<ARMOperand>
3692   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3693     auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3694     Op->Imm.Val = Val;
3695     Op->StartLoc = S;
3696     Op->EndLoc = E;
3697     return Op;
3698   }
3699 
3700   static std::unique_ptr<ARMOperand>
3701   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3702     auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3703     Op->Bitfield.LSB = LSB;
3704     Op->Bitfield.Width = Width;
3705     Op->StartLoc = S;
3706     Op->EndLoc = E;
3707     return Op;
3708   }
3709 
3710   static std::unique_ptr<ARMOperand>
3711   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3712                 SMLoc StartLoc, SMLoc EndLoc) {
3713     assert(Regs.size() > 0 && "RegList contains no registers?");
3714     KindTy Kind = k_RegisterList;
3715 
3716     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3717             Regs.front().second)) {
3718       if (Regs.back().second == ARM::VPR)
3719         Kind = k_FPDRegisterListWithVPR;
3720       else
3721         Kind = k_DPRRegisterList;
3722     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3723                    Regs.front().second)) {
3724       if (Regs.back().second == ARM::VPR)
3725         Kind = k_FPSRegisterListWithVPR;
3726       else
3727         Kind = k_SPRRegisterList;
3728     }
3729 
3730     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3731       Kind = k_RegisterListWithAPSR;
3732 
3733     assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3734 
3735     auto Op = std::make_unique<ARMOperand>(Kind);
3736     for (const auto &P : Regs)
3737       Op->Registers.push_back(P.second);
3738 
3739     Op->StartLoc = StartLoc;
3740     Op->EndLoc = EndLoc;
3741     return Op;
3742   }
3743 
3744   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3745                                                       unsigned Count,
3746                                                       bool isDoubleSpaced,
3747                                                       SMLoc S, SMLoc E) {
3748     auto Op = std::make_unique<ARMOperand>(k_VectorList);
3749     Op->VectorList.RegNum = RegNum;
3750     Op->VectorList.Count = Count;
3751     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3752     Op->StartLoc = S;
3753     Op->EndLoc = E;
3754     return Op;
3755   }
3756 
3757   static std::unique_ptr<ARMOperand>
3758   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3759                            SMLoc S, SMLoc E) {
3760     auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3761     Op->VectorList.RegNum = RegNum;
3762     Op->VectorList.Count = Count;
3763     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3764     Op->StartLoc = S;
3765     Op->EndLoc = E;
3766     return Op;
3767   }
3768 
3769   static std::unique_ptr<ARMOperand>
3770   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3771                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3772     auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3773     Op->VectorList.RegNum = RegNum;
3774     Op->VectorList.Count = Count;
3775     Op->VectorList.LaneIndex = Index;
3776     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3777     Op->StartLoc = S;
3778     Op->EndLoc = E;
3779     return Op;
3780   }
3781 
3782   static std::unique_ptr<ARMOperand>
3783   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3784     auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3785     Op->VectorIndex.Val = Idx;
3786     Op->StartLoc = S;
3787     Op->EndLoc = E;
3788     return Op;
3789   }
3790 
3791   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3792                                                SMLoc E) {
3793     auto Op = std::make_unique<ARMOperand>(k_Immediate);
3794     Op->Imm.Val = Val;
3795     Op->StartLoc = S;
3796     Op->EndLoc = E;
3797     return Op;
3798   }
3799 
3800   static std::unique_ptr<ARMOperand>
3801   CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3802             ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3803             bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3804     auto Op = std::make_unique<ARMOperand>(k_Memory);
3805     Op->Memory.BaseRegNum = BaseRegNum;
3806     Op->Memory.OffsetImm = OffsetImm;
3807     Op->Memory.OffsetRegNum = OffsetRegNum;
3808     Op->Memory.ShiftType = ShiftType;
3809     Op->Memory.ShiftImm = ShiftImm;
3810     Op->Memory.Alignment = Alignment;
3811     Op->Memory.isNegative = isNegative;
3812     Op->StartLoc = S;
3813     Op->EndLoc = E;
3814     Op->AlignmentLoc = AlignmentLoc;
3815     return Op;
3816   }
3817 
3818   static std::unique_ptr<ARMOperand>
3819   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3820                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3821     auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3822     Op->PostIdxReg.RegNum = RegNum;
3823     Op->PostIdxReg.isAdd = isAdd;
3824     Op->PostIdxReg.ShiftTy = ShiftTy;
3825     Op->PostIdxReg.ShiftImm = ShiftImm;
3826     Op->StartLoc = S;
3827     Op->EndLoc = E;
3828     return Op;
3829   }
3830 
3831   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3832                                                          SMLoc S) {
3833     auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3834     Op->MBOpt.Val = Opt;
3835     Op->StartLoc = S;
3836     Op->EndLoc = S;
3837     return Op;
3838   }
3839 
3840   static std::unique_ptr<ARMOperand>
3841   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3842     auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3843     Op->ISBOpt.Val = Opt;
3844     Op->StartLoc = S;
3845     Op->EndLoc = S;
3846     return Op;
3847   }
3848 
3849   static std::unique_ptr<ARMOperand>
3850   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3851     auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3852     Op->TSBOpt.Val = Opt;
3853     Op->StartLoc = S;
3854     Op->EndLoc = S;
3855     return Op;
3856   }
3857 
3858   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3859                                                       SMLoc S) {
3860     auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3861     Op->IFlags.Val = IFlags;
3862     Op->StartLoc = S;
3863     Op->EndLoc = S;
3864     return Op;
3865   }
3866 
3867   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3868     auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3869     Op->MMask.Val = MMask;
3870     Op->StartLoc = S;
3871     Op->EndLoc = S;
3872     return Op;
3873   }
3874 
3875   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3876     auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3877     Op->BankedReg.Val = Reg;
3878     Op->StartLoc = S;
3879     Op->EndLoc = S;
3880     return Op;
3881   }
3882 };
3883 
3884 } // end anonymous namespace.
3885 
3886 void ARMOperand::print(raw_ostream &OS) const {
3887   auto RegName = [](MCRegister Reg) {
3888     if (Reg)
3889       return ARMInstPrinter::getRegisterName(Reg);
3890     else
3891       return "noreg";
3892   };
3893 
3894   switch (Kind) {
3895   case k_CondCode:
3896     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3897     break;
3898   case k_VPTPred:
3899     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3900     break;
3901   case k_CCOut:
3902     OS << "<ccout " << RegName(getReg()) << ">";
3903     break;
3904   case k_ITCondMask: {
3905     static const char *const MaskStr[] = {
3906       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3907       "(tt)",      "(ttet)", "(tte)", "(ttee)",
3908       "(t)",       "(tett)", "(tet)", "(tete)",
3909       "(te)",      "(teet)", "(tee)", "(teee)",
3910     };
3911     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3912     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3913     break;
3914   }
3915   case k_CoprocNum:
3916     OS << "<coprocessor number: " << getCoproc() << ">";
3917     break;
3918   case k_CoprocReg:
3919     OS << "<coprocessor register: " << getCoproc() << ">";
3920     break;
3921   case k_CoprocOption:
3922     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3923     break;
3924   case k_MSRMask:
3925     OS << "<mask: " << getMSRMask() << ">";
3926     break;
3927   case k_BankedReg:
3928     OS << "<banked reg: " << getBankedReg() << ">";
3929     break;
3930   case k_Immediate:
3931     OS << *getImm();
3932     break;
3933   case k_MemBarrierOpt:
3934     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3935     break;
3936   case k_InstSyncBarrierOpt:
3937     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3938     break;
3939   case k_TraceSyncBarrierOpt:
3940     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3941     break;
3942   case k_Memory:
3943     OS << "<memory";
3944     if (Memory.BaseRegNum)
3945       OS << " base:" << RegName(Memory.BaseRegNum);
3946     if (Memory.OffsetImm)
3947       OS << " offset-imm:" << *Memory.OffsetImm;
3948     if (Memory.OffsetRegNum)
3949       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3950          << RegName(Memory.OffsetRegNum);
3951     if (Memory.ShiftType != ARM_AM::no_shift) {
3952       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3953       OS << " shift-imm:" << Memory.ShiftImm;
3954     }
3955     if (Memory.Alignment)
3956       OS << " alignment:" << Memory.Alignment;
3957     OS << ">";
3958     break;
3959   case k_PostIndexRegister:
3960     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3961        << RegName(PostIdxReg.RegNum);
3962     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3963       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3964          << PostIdxReg.ShiftImm;
3965     OS << ">";
3966     break;
3967   case k_ProcIFlags: {
3968     OS << "<ARM_PROC::";
3969     unsigned IFlags = getProcIFlags();
3970     for (int i=2; i >= 0; --i)
3971       if (IFlags & (1 << i))
3972         OS << ARM_PROC::IFlagsToString(1 << i);
3973     OS << ">";
3974     break;
3975   }
3976   case k_Register:
3977     OS << "<register " << RegName(getReg()) << ">";
3978     break;
3979   case k_ShifterImmediate:
3980     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3981        << " #" << ShifterImm.Imm << ">";
3982     break;
3983   case k_ShiftedRegister:
3984     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3985        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3986        << RegName(RegShiftedReg.ShiftReg) << ">";
3987     break;
3988   case k_ShiftedImmediate:
3989     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3990        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3991        << RegShiftedImm.ShiftImm << ">";
3992     break;
3993   case k_RotateImmediate:
3994     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3995     break;
3996   case k_ModifiedImmediate:
3997     OS << "<mod_imm #" << ModImm.Bits << ", #"
3998        <<  ModImm.Rot << ")>";
3999     break;
4000   case k_ConstantPoolImmediate:
4001     OS << "<constant_pool_imm #" << *getConstantPoolImm();
4002     break;
4003   case k_BitfieldDescriptor:
4004     OS << "<bitfield " << "lsb: " << Bitfield.LSB
4005        << ", width: " << Bitfield.Width << ">";
4006     break;
4007   case k_RegisterList:
4008   case k_RegisterListWithAPSR:
4009   case k_DPRRegisterList:
4010   case k_SPRRegisterList:
4011   case k_FPSRegisterListWithVPR:
4012   case k_FPDRegisterListWithVPR: {
4013     OS << "<register_list ";
4014 
4015     const SmallVectorImpl<unsigned> &RegList = getRegList();
4016     for (SmallVectorImpl<unsigned>::const_iterator
4017            I = RegList.begin(), E = RegList.end(); I != E; ) {
4018       OS << RegName(*I);
4019       if (++I < E) OS << ", ";
4020     }
4021 
4022     OS << ">";
4023     break;
4024   }
4025   case k_VectorList:
4026     OS << "<vector_list " << VectorList.Count << " * "
4027        << RegName(VectorList.RegNum) << ">";
4028     break;
4029   case k_VectorListAllLanes:
4030     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4031        << RegName(VectorList.RegNum) << ">";
4032     break;
4033   case k_VectorListIndexed:
4034     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4035        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4036     break;
4037   case k_Token:
4038     OS << "'" << getToken() << "'";
4039     break;
4040   case k_VectorIndex:
4041     OS << "<vectorindex " << getVectorIndex() << ">";
4042     break;
4043   }
4044 }
4045 
4046 /// @name Auto-generated Match Functions
4047 /// {
4048 
4049 static unsigned MatchRegisterName(StringRef Name);
4050 
4051 /// }
4052 
4053 bool ARMAsmParser::parseRegister(MCRegister &RegNo, SMLoc &StartLoc,
4054                                  SMLoc &EndLoc) {
4055   const AsmToken &Tok = getParser().getTok();
4056   StartLoc = Tok.getLoc();
4057   EndLoc = Tok.getEndLoc();
4058   RegNo = tryParseRegister();
4059 
4060   return (RegNo == (unsigned)-1);
4061 }
4062 
4063 OperandMatchResultTy ARMAsmParser::tryParseRegister(MCRegister &RegNo,
4064                                                     SMLoc &StartLoc,
4065                                                     SMLoc &EndLoc) {
4066   if (parseRegister(RegNo, StartLoc, EndLoc))
4067     return MatchOperand_NoMatch;
4068   return MatchOperand_Success;
4069 }
4070 
4071 /// Try to parse a register name.  The token must be an Identifier when called,
4072 /// and if it is a register name the token is eaten and the register number is
4073 /// returned.  Otherwise return -1.
4074 int ARMAsmParser::tryParseRegister() {
4075   MCAsmParser &Parser = getParser();
4076   const AsmToken &Tok = Parser.getTok();
4077   if (Tok.isNot(AsmToken::Identifier)) return -1;
4078 
4079   std::string lowerCase = Tok.getString().lower();
4080   unsigned RegNum = MatchRegisterName(lowerCase);
4081   if (!RegNum) {
4082     RegNum = StringSwitch<unsigned>(lowerCase)
4083       .Case("r13", ARM::SP)
4084       .Case("r14", ARM::LR)
4085       .Case("r15", ARM::PC)
4086       .Case("ip", ARM::R12)
4087       // Additional register name aliases for 'gas' compatibility.
4088       .Case("a1", ARM::R0)
4089       .Case("a2", ARM::R1)
4090       .Case("a3", ARM::R2)
4091       .Case("a4", ARM::R3)
4092       .Case("v1", ARM::R4)
4093       .Case("v2", ARM::R5)
4094       .Case("v3", ARM::R6)
4095       .Case("v4", ARM::R7)
4096       .Case("v5", ARM::R8)
4097       .Case("v6", ARM::R9)
4098       .Case("v7", ARM::R10)
4099       .Case("v8", ARM::R11)
4100       .Case("sb", ARM::R9)
4101       .Case("sl", ARM::R10)
4102       .Case("fp", ARM::R11)
4103       .Default(0);
4104   }
4105   if (!RegNum) {
4106     // Check for aliases registered via .req. Canonicalize to lower case.
4107     // That's more consistent since register names are case insensitive, and
4108     // it's how the original entry was passed in from MC/MCParser/AsmParser.
4109     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4110     // If no match, return failure.
4111     if (Entry == RegisterReqs.end())
4112       return -1;
4113     Parser.Lex(); // Eat identifier token.
4114     return Entry->getValue();
4115   }
4116 
4117   // Some FPUs only have 16 D registers, so D16-D31 are invalid
4118   if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
4119     return -1;
4120 
4121   Parser.Lex(); // Eat identifier token.
4122 
4123   return RegNum;
4124 }
4125 
4126 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
4127 // If a recoverable error occurs, return 1. If an irrecoverable error
4128 // occurs, return -1. An irrecoverable error is one where tokens have been
4129 // consumed in the process of trying to parse the shifter (i.e., when it is
4130 // indeed a shifter operand, but malformed).
4131 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4132   MCAsmParser &Parser = getParser();
4133   SMLoc S = Parser.getTok().getLoc();
4134   const AsmToken &Tok = Parser.getTok();
4135   if (Tok.isNot(AsmToken::Identifier))
4136     return -1;
4137 
4138   std::string lowerCase = Tok.getString().lower();
4139   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
4140       .Case("asl", ARM_AM::lsl)
4141       .Case("lsl", ARM_AM::lsl)
4142       .Case("lsr", ARM_AM::lsr)
4143       .Case("asr", ARM_AM::asr)
4144       .Case("ror", ARM_AM::ror)
4145       .Case("rrx", ARM_AM::rrx)
4146       .Default(ARM_AM::no_shift);
4147 
4148   if (ShiftTy == ARM_AM::no_shift)
4149     return 1;
4150 
4151   Parser.Lex(); // Eat the operator.
4152 
4153   // The source register for the shift has already been added to the
4154   // operand list, so we need to pop it off and combine it into the shifted
4155   // register operand instead.
4156   std::unique_ptr<ARMOperand> PrevOp(
4157       (ARMOperand *)Operands.pop_back_val().release());
4158   if (!PrevOp->isReg())
4159     return Error(PrevOp->getStartLoc(), "shift must be of a register");
4160   int SrcReg = PrevOp->getReg();
4161 
4162   SMLoc EndLoc;
4163   int64_t Imm = 0;
4164   int ShiftReg = 0;
4165   if (ShiftTy == ARM_AM::rrx) {
4166     // RRX Doesn't have an explicit shift amount. The encoder expects
4167     // the shift register to be the same as the source register. Seems odd,
4168     // but OK.
4169     ShiftReg = SrcReg;
4170   } else {
4171     // Figure out if this is shifted by a constant or a register (for non-RRX).
4172     if (Parser.getTok().is(AsmToken::Hash) ||
4173         Parser.getTok().is(AsmToken::Dollar)) {
4174       Parser.Lex(); // Eat hash.
4175       SMLoc ImmLoc = Parser.getTok().getLoc();
4176       const MCExpr *ShiftExpr = nullptr;
4177       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4178         Error(ImmLoc, "invalid immediate shift value");
4179         return -1;
4180       }
4181       // The expression must be evaluatable as an immediate.
4182       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4183       if (!CE) {
4184         Error(ImmLoc, "invalid immediate shift value");
4185         return -1;
4186       }
4187       // Range check the immediate.
4188       // lsl, ror: 0 <= imm <= 31
4189       // lsr, asr: 0 <= imm <= 32
4190       Imm = CE->getValue();
4191       if (Imm < 0 ||
4192           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4193           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4194         Error(ImmLoc, "immediate shift value out of range");
4195         return -1;
4196       }
4197       // shift by zero is a nop. Always send it through as lsl.
4198       // ('as' compatibility)
4199       if (Imm == 0)
4200         ShiftTy = ARM_AM::lsl;
4201     } else if (Parser.getTok().is(AsmToken::Identifier)) {
4202       SMLoc L = Parser.getTok().getLoc();
4203       EndLoc = Parser.getTok().getEndLoc();
4204       ShiftReg = tryParseRegister();
4205       if (ShiftReg == -1) {
4206         Error(L, "expected immediate or register in shift operand");
4207         return -1;
4208       }
4209     } else {
4210       Error(Parser.getTok().getLoc(),
4211             "expected immediate or register in shift operand");
4212       return -1;
4213     }
4214   }
4215 
4216   if (ShiftReg && ShiftTy != ARM_AM::rrx)
4217     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4218                                                          ShiftReg, Imm,
4219                                                          S, EndLoc));
4220   else
4221     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4222                                                           S, EndLoc));
4223 
4224   return 0;
4225 }
4226 
4227 /// Try to parse a register name.  The token must be an Identifier when called.
4228 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
4229 /// if there is a "writeback". 'true' if it's not a register.
4230 ///
4231 /// TODO this is likely to change to allow different register types and or to
4232 /// parse for a specific register type.
4233 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4234   MCAsmParser &Parser = getParser();
4235   SMLoc RegStartLoc = Parser.getTok().getLoc();
4236   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4237   int RegNo = tryParseRegister();
4238   if (RegNo == -1)
4239     return true;
4240 
4241   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4242 
4243   const AsmToken &ExclaimTok = Parser.getTok();
4244   if (ExclaimTok.is(AsmToken::Exclaim)) {
4245     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4246                                                ExclaimTok.getLoc()));
4247     Parser.Lex(); // Eat exclaim token
4248     return false;
4249   }
4250 
4251   // Also check for an index operand. This is only legal for vector registers,
4252   // but that'll get caught OK in operand matching, so we don't need to
4253   // explicitly filter everything else out here.
4254   if (Parser.getTok().is(AsmToken::LBrac)) {
4255     SMLoc SIdx = Parser.getTok().getLoc();
4256     Parser.Lex(); // Eat left bracket token.
4257 
4258     const MCExpr *ImmVal;
4259     if (getParser().parseExpression(ImmVal))
4260       return true;
4261     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4262     if (!MCE)
4263       return TokError("immediate value expected for vector index");
4264 
4265     if (Parser.getTok().isNot(AsmToken::RBrac))
4266       return Error(Parser.getTok().getLoc(), "']' expected");
4267 
4268     SMLoc E = Parser.getTok().getEndLoc();
4269     Parser.Lex(); // Eat right bracket token.
4270 
4271     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4272                                                      SIdx, E,
4273                                                      getContext()));
4274   }
4275 
4276   return false;
4277 }
4278 
4279 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
4280 /// instruction with a symbolic operand name.
4281 /// We accept "crN" syntax for GAS compatibility.
4282 /// <operand-name> ::= <prefix><number>
4283 /// If CoprocOp is 'c', then:
4284 ///   <prefix> ::= c | cr
4285 /// If CoprocOp is 'p', then :
4286 ///   <prefix> ::= p
4287 /// <number> ::= integer in range [0, 15]
4288 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4289   // Use the same layout as the tablegen'erated register name matcher. Ugly,
4290   // but efficient.
4291   if (Name.size() < 2 || Name[0] != CoprocOp)
4292     return -1;
4293   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4294 
4295   switch (Name.size()) {
4296   default: return -1;
4297   case 1:
4298     switch (Name[0]) {
4299     default:  return -1;
4300     case '0': return 0;
4301     case '1': return 1;
4302     case '2': return 2;
4303     case '3': return 3;
4304     case '4': return 4;
4305     case '5': return 5;
4306     case '6': return 6;
4307     case '7': return 7;
4308     case '8': return 8;
4309     case '9': return 9;
4310     }
4311   case 2:
4312     if (Name[0] != '1')
4313       return -1;
4314     switch (Name[1]) {
4315     default:  return -1;
4316     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4317     // However, old cores (v5/v6) did use them in that way.
4318     case '0': return 10;
4319     case '1': return 11;
4320     case '2': return 12;
4321     case '3': return 13;
4322     case '4': return 14;
4323     case '5': return 15;
4324     }
4325   }
4326 }
4327 
4328 /// parseITCondCode - Try to parse a condition code for an IT instruction.
4329 ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4330   MCAsmParser &Parser = getParser();
4331   SMLoc S = Parser.getTok().getLoc();
4332   const AsmToken &Tok = Parser.getTok();
4333   if (!Tok.is(AsmToken::Identifier))
4334     return ParseStatus::NoMatch;
4335   unsigned CC = ARMCondCodeFromString(Tok.getString());
4336   if (CC == ~0U)
4337     return ParseStatus::NoMatch;
4338   Parser.Lex(); // Eat the token.
4339 
4340   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4341 
4342   return ParseStatus::Success;
4343 }
4344 
4345 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4346 /// token must be an Identifier when called, and if it is a coprocessor
4347 /// number, the token is eaten and the operand is added to the operand list.
4348 ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4349   MCAsmParser &Parser = getParser();
4350   SMLoc S = Parser.getTok().getLoc();
4351   const AsmToken &Tok = Parser.getTok();
4352   if (Tok.isNot(AsmToken::Identifier))
4353     return ParseStatus::NoMatch;
4354 
4355   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4356   if (Num == -1)
4357     return ParseStatus::NoMatch;
4358   if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4359     return ParseStatus::NoMatch;
4360 
4361   Parser.Lex(); // Eat identifier token.
4362   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4363   return ParseStatus::Success;
4364 }
4365 
4366 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4367 /// token must be an Identifier when called, and if it is a coprocessor
4368 /// number, the token is eaten and the operand is added to the operand list.
4369 ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4370   MCAsmParser &Parser = getParser();
4371   SMLoc S = Parser.getTok().getLoc();
4372   const AsmToken &Tok = Parser.getTok();
4373   if (Tok.isNot(AsmToken::Identifier))
4374     return ParseStatus::NoMatch;
4375 
4376   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4377   if (Reg == -1)
4378     return ParseStatus::NoMatch;
4379 
4380   Parser.Lex(); // Eat identifier token.
4381   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4382   return ParseStatus::Success;
4383 }
4384 
4385 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4386 /// coproc_option : '{' imm0_255 '}'
4387 ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4388   MCAsmParser &Parser = getParser();
4389   SMLoc S = Parser.getTok().getLoc();
4390 
4391   // If this isn't a '{', this isn't a coprocessor immediate operand.
4392   if (Parser.getTok().isNot(AsmToken::LCurly))
4393     return ParseStatus::NoMatch;
4394   Parser.Lex(); // Eat the '{'
4395 
4396   const MCExpr *Expr;
4397   SMLoc Loc = Parser.getTok().getLoc();
4398   if (getParser().parseExpression(Expr))
4399     return Error(Loc, "illegal expression");
4400   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4401   if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4402     return Error(Loc,
4403                  "coprocessor option must be an immediate in range [0, 255]");
4404   int Val = CE->getValue();
4405 
4406   // Check for and consume the closing '}'
4407   if (Parser.getTok().isNot(AsmToken::RCurly))
4408     return ParseStatus::Failure;
4409   SMLoc E = Parser.getTok().getEndLoc();
4410   Parser.Lex(); // Eat the '}'
4411 
4412   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4413   return ParseStatus::Success;
4414 }
4415 
4416 // For register list parsing, we need to map from raw GPR register numbering
4417 // to the enumeration values. The enumeration values aren't sorted by
4418 // register number due to our using "sp", "lr" and "pc" as canonical names.
4419 static unsigned getNextRegister(unsigned Reg) {
4420   // If this is a GPR, we need to do it manually, otherwise we can rely
4421   // on the sort ordering of the enumeration since the other reg-classes
4422   // are sane.
4423   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4424     return Reg + 1;
4425   switch(Reg) {
4426   default: llvm_unreachable("Invalid GPR number!");
4427   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
4428   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
4429   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
4430   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
4431   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
4432   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4433   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
4434   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
4435   }
4436 }
4437 
4438 // Insert an <Encoding, Register> pair in an ordered vector. Return true on
4439 // success, or false, if duplicate encoding found.
4440 static bool
4441 insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4442                    unsigned Enc, unsigned Reg) {
4443   Regs.emplace_back(Enc, Reg);
4444   for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4445     if (J->first == Enc) {
4446       Regs.erase(J.base());
4447       return false;
4448     }
4449     if (J->first < Enc)
4450       break;
4451     std::swap(*I, *J);
4452   }
4453   return true;
4454 }
4455 
4456 /// Parse a register list.
4457 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4458                                      bool AllowRAAC) {
4459   MCAsmParser &Parser = getParser();
4460   if (Parser.getTok().isNot(AsmToken::LCurly))
4461     return TokError("Token is not a Left Curly Brace");
4462   SMLoc S = Parser.getTok().getLoc();
4463   Parser.Lex(); // Eat '{' token.
4464   SMLoc RegLoc = Parser.getTok().getLoc();
4465 
4466   // Check the first register in the list to see what register class
4467   // this is a list of.
4468   int Reg = tryParseRegister();
4469   if (Reg == -1)
4470     return Error(RegLoc, "register expected");
4471   if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4472     return Error(RegLoc, "pseudo-register not allowed");
4473   // The reglist instructions have at most 16 registers, so reserve
4474   // space for that many.
4475   int EReg = 0;
4476   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4477 
4478   // Allow Q regs and just interpret them as the two D sub-registers.
4479   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4480     Reg = getDRegFromQReg(Reg);
4481     EReg = MRI->getEncodingValue(Reg);
4482     Registers.emplace_back(EReg, Reg);
4483     ++Reg;
4484   }
4485   const MCRegisterClass *RC;
4486   if (Reg == ARM::RA_AUTH_CODE ||
4487       ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4488     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4489   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4490     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4491   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4492     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4493   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4494     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4495   else
4496     return Error(RegLoc, "invalid register in register list");
4497 
4498   // Store the register.
4499   EReg = MRI->getEncodingValue(Reg);
4500   Registers.emplace_back(EReg, Reg);
4501 
4502   // This starts immediately after the first register token in the list,
4503   // so we can see either a comma or a minus (range separator) as a legal
4504   // next token.
4505   while (Parser.getTok().is(AsmToken::Comma) ||
4506          Parser.getTok().is(AsmToken::Minus)) {
4507     if (Parser.getTok().is(AsmToken::Minus)) {
4508       if (Reg == ARM::RA_AUTH_CODE)
4509         return Error(RegLoc, "pseudo-register not allowed");
4510       Parser.Lex(); // Eat the minus.
4511       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4512       int EndReg = tryParseRegister();
4513       if (EndReg == -1)
4514         return Error(AfterMinusLoc, "register expected");
4515       if (EndReg == ARM::RA_AUTH_CODE)
4516         return Error(AfterMinusLoc, "pseudo-register not allowed");
4517       // Allow Q regs and just interpret them as the two D sub-registers.
4518       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4519         EndReg = getDRegFromQReg(EndReg) + 1;
4520       // If the register is the same as the start reg, there's nothing
4521       // more to do.
4522       if (Reg == EndReg)
4523         continue;
4524       // The register must be in the same register class as the first.
4525       if (!RC->contains(Reg))
4526         return Error(AfterMinusLoc, "invalid register in register list");
4527       // Ranges must go from low to high.
4528       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4529         return Error(AfterMinusLoc, "bad range in register list");
4530 
4531       // Add all the registers in the range to the register list.
4532       while (Reg != EndReg) {
4533         Reg = getNextRegister(Reg);
4534         EReg = MRI->getEncodingValue(Reg);
4535         if (!insertNoDuplicates(Registers, EReg, Reg)) {
4536           Warning(AfterMinusLoc, StringRef("duplicated register (") +
4537                                      ARMInstPrinter::getRegisterName(Reg) +
4538                                      ") in register list");
4539         }
4540       }
4541       continue;
4542     }
4543     Parser.Lex(); // Eat the comma.
4544     RegLoc = Parser.getTok().getLoc();
4545     int OldReg = Reg;
4546     const AsmToken RegTok = Parser.getTok();
4547     Reg = tryParseRegister();
4548     if (Reg == -1)
4549       return Error(RegLoc, "register expected");
4550     if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4551       return Error(RegLoc, "pseudo-register not allowed");
4552     // Allow Q regs and just interpret them as the two D sub-registers.
4553     bool isQReg = false;
4554     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4555       Reg = getDRegFromQReg(Reg);
4556       isQReg = true;
4557     }
4558     if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4559         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4560         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4561       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4562       // subset of GPRRegClassId except it contains APSR as well.
4563       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4564     }
4565     if (Reg == ARM::VPR &&
4566         (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4567          RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4568          RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4569       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4570       EReg = MRI->getEncodingValue(Reg);
4571       if (!insertNoDuplicates(Registers, EReg, Reg)) {
4572         Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4573                             ") in register list");
4574       }
4575       continue;
4576     }
4577     // The register must be in the same register class as the first.
4578     if ((Reg == ARM::RA_AUTH_CODE &&
4579          RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4580         (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4581       return Error(RegLoc, "invalid register in register list");
4582     // In most cases, the list must be monotonically increasing. An
4583     // exception is CLRM, which is order-independent anyway, so
4584     // there's no potential for confusion if you write clrm {r2,r1}
4585     // instead of clrm {r1,r2}.
4586     if (EnforceOrder &&
4587         MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4588       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4589         Warning(RegLoc, "register list not in ascending order");
4590       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4591         return Error(RegLoc, "register list not in ascending order");
4592     }
4593     // VFP register lists must also be contiguous.
4594     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4595         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4596         Reg != OldReg + 1)
4597       return Error(RegLoc, "non-contiguous register range");
4598     EReg = MRI->getEncodingValue(Reg);
4599     if (!insertNoDuplicates(Registers, EReg, Reg)) {
4600       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4601                           ") in register list");
4602     }
4603     if (isQReg) {
4604       EReg = MRI->getEncodingValue(++Reg);
4605       Registers.emplace_back(EReg, Reg);
4606     }
4607   }
4608 
4609   if (Parser.getTok().isNot(AsmToken::RCurly))
4610     return Error(Parser.getTok().getLoc(), "'}' expected");
4611   SMLoc E = Parser.getTok().getEndLoc();
4612   Parser.Lex(); // Eat '}' token.
4613 
4614   // Push the register list operand.
4615   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4616 
4617   // The ARM system instruction variants for LDM/STM have a '^' token here.
4618   if (Parser.getTok().is(AsmToken::Caret)) {
4619     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4620     Parser.Lex(); // Eat '^' token.
4621   }
4622 
4623   return false;
4624 }
4625 
4626 // Helper function to parse the lane index for vector lists.
4627 ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4628                                           unsigned &Index, SMLoc &EndLoc) {
4629   MCAsmParser &Parser = getParser();
4630   Index = 0; // Always return a defined index value.
4631   if (Parser.getTok().is(AsmToken::LBrac)) {
4632     Parser.Lex(); // Eat the '['.
4633     if (Parser.getTok().is(AsmToken::RBrac)) {
4634       // "Dn[]" is the 'all lanes' syntax.
4635       LaneKind = AllLanes;
4636       EndLoc = Parser.getTok().getEndLoc();
4637       Parser.Lex(); // Eat the ']'.
4638       return ParseStatus::Success;
4639     }
4640 
4641     // There's an optional '#' token here. Normally there wouldn't be, but
4642     // inline assemble puts one in, and it's friendly to accept that.
4643     if (Parser.getTok().is(AsmToken::Hash))
4644       Parser.Lex(); // Eat '#' or '$'.
4645 
4646     const MCExpr *LaneIndex;
4647     SMLoc Loc = Parser.getTok().getLoc();
4648     if (getParser().parseExpression(LaneIndex))
4649       return Error(Loc, "illegal expression");
4650     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4651     if (!CE)
4652       return Error(Loc, "lane index must be empty or an integer");
4653     if (Parser.getTok().isNot(AsmToken::RBrac))
4654       return Error(Parser.getTok().getLoc(), "']' expected");
4655     EndLoc = Parser.getTok().getEndLoc();
4656     Parser.Lex(); // Eat the ']'.
4657     int64_t Val = CE->getValue();
4658 
4659     // FIXME: Make this range check context sensitive for .8, .16, .32.
4660     if (Val < 0 || Val > 7)
4661       return Error(Parser.getTok().getLoc(), "lane index out of range");
4662     Index = Val;
4663     LaneKind = IndexedLane;
4664     return ParseStatus::Success;
4665   }
4666   LaneKind = NoLanes;
4667   return ParseStatus::Success;
4668 }
4669 
4670 // parse a vector register list
4671 ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4672   MCAsmParser &Parser = getParser();
4673   VectorLaneTy LaneKind;
4674   unsigned LaneIndex;
4675   SMLoc S = Parser.getTok().getLoc();
4676   // As an extension (to match gas), support a plain D register or Q register
4677   // (without encosing curly braces) as a single or double entry list,
4678   // respectively.
4679   if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4680     SMLoc E = Parser.getTok().getEndLoc();
4681     int Reg = tryParseRegister();
4682     if (Reg == -1)
4683       return ParseStatus::NoMatch;
4684     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4685       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4686       if (!Res.isSuccess())
4687         return Res;
4688       switch (LaneKind) {
4689       case NoLanes:
4690         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4691         break;
4692       case AllLanes:
4693         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4694                                                                 S, E));
4695         break;
4696       case IndexedLane:
4697         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4698                                                                LaneIndex,
4699                                                                false, S, E));
4700         break;
4701       }
4702       return ParseStatus::Success;
4703     }
4704     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4705       Reg = getDRegFromQReg(Reg);
4706       ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4707       if (!Res.isSuccess())
4708         return Res;
4709       switch (LaneKind) {
4710       case NoLanes:
4711         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4712                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4713         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4714         break;
4715       case AllLanes:
4716         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4717                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4718         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4719                                                                 S, E));
4720         break;
4721       case IndexedLane:
4722         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4723                                                                LaneIndex,
4724                                                                false, S, E));
4725         break;
4726       }
4727       return ParseStatus::Success;
4728     }
4729     return Error(S, "vector register expected");
4730   }
4731 
4732   if (Parser.getTok().isNot(AsmToken::LCurly))
4733     return ParseStatus::NoMatch;
4734 
4735   Parser.Lex(); // Eat '{' token.
4736   SMLoc RegLoc = Parser.getTok().getLoc();
4737 
4738   int Reg = tryParseRegister();
4739   if (Reg == -1)
4740     return Error(RegLoc, "register expected");
4741   unsigned Count = 1;
4742   int Spacing = 0;
4743   unsigned FirstReg = Reg;
4744 
4745   if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4746     return Error(Parser.getTok().getLoc(),
4747                  "vector register in range Q0-Q7 expected");
4748   // The list is of D registers, but we also allow Q regs and just interpret
4749   // them as the two D sub-registers.
4750   else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4751     FirstReg = Reg = getDRegFromQReg(Reg);
4752     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4753                  // it's ambiguous with four-register single spaced.
4754     ++Reg;
4755     ++Count;
4756   }
4757 
4758   SMLoc E;
4759   if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4760     return ParseStatus::Failure;
4761 
4762   while (Parser.getTok().is(AsmToken::Comma) ||
4763          Parser.getTok().is(AsmToken::Minus)) {
4764     if (Parser.getTok().is(AsmToken::Minus)) {
4765       if (!Spacing)
4766         Spacing = 1; // Register range implies a single spaced list.
4767       else if (Spacing == 2)
4768         return Error(Parser.getTok().getLoc(),
4769                      "sequential registers in double spaced list");
4770       Parser.Lex(); // Eat the minus.
4771       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4772       int EndReg = tryParseRegister();
4773       if (EndReg == -1)
4774         return Error(AfterMinusLoc, "register expected");
4775       // Allow Q regs and just interpret them as the two D sub-registers.
4776       if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4777         EndReg = getDRegFromQReg(EndReg) + 1;
4778       // If the register is the same as the start reg, there's nothing
4779       // more to do.
4780       if (Reg == EndReg)
4781         continue;
4782       // The register must be in the same register class as the first.
4783       if ((hasMVE() &&
4784            !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4785           (!hasMVE() &&
4786            !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4787         return Error(AfterMinusLoc, "invalid register in register list");
4788       // Ranges must go from low to high.
4789       if (Reg > EndReg)
4790         return Error(AfterMinusLoc, "bad range in register list");
4791       // Parse the lane specifier if present.
4792       VectorLaneTy NextLaneKind;
4793       unsigned NextLaneIndex;
4794       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4795         return ParseStatus::Failure;
4796       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4797         return Error(AfterMinusLoc, "mismatched lane index in register list");
4798 
4799       // Add all the registers in the range to the register list.
4800       Count += EndReg - Reg;
4801       Reg = EndReg;
4802       continue;
4803     }
4804     Parser.Lex(); // Eat the comma.
4805     RegLoc = Parser.getTok().getLoc();
4806     int OldReg = Reg;
4807     Reg = tryParseRegister();
4808     if (Reg == -1)
4809       return Error(RegLoc, "register expected");
4810 
4811     if (hasMVE()) {
4812       if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4813         return Error(RegLoc, "vector register in range Q0-Q7 expected");
4814       Spacing = 1;
4815     }
4816     // vector register lists must be contiguous.
4817     // It's OK to use the enumeration values directly here rather, as the
4818     // VFP register classes have the enum sorted properly.
4819     //
4820     // The list is of D registers, but we also allow Q regs and just interpret
4821     // them as the two D sub-registers.
4822     else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4823       if (!Spacing)
4824         Spacing = 1; // Register range implies a single spaced list.
4825       else if (Spacing == 2)
4826         return Error(
4827             RegLoc,
4828             "invalid register in double-spaced list (must be 'D' register')");
4829       Reg = getDRegFromQReg(Reg);
4830       if (Reg != OldReg + 1)
4831         return Error(RegLoc, "non-contiguous register range");
4832       ++Reg;
4833       Count += 2;
4834       // Parse the lane specifier if present.
4835       VectorLaneTy NextLaneKind;
4836       unsigned NextLaneIndex;
4837       SMLoc LaneLoc = Parser.getTok().getLoc();
4838       if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4839         return ParseStatus::Failure;
4840       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4841         return Error(LaneLoc, "mismatched lane index in register list");
4842       continue;
4843     }
4844     // Normal D register.
4845     // Figure out the register spacing (single or double) of the list if
4846     // we don't know it already.
4847     if (!Spacing)
4848       Spacing = 1 + (Reg == OldReg + 2);
4849 
4850     // Just check that it's contiguous and keep going.
4851     if (Reg != OldReg + Spacing)
4852       return Error(RegLoc, "non-contiguous register range");
4853     ++Count;
4854     // Parse the lane specifier if present.
4855     VectorLaneTy NextLaneKind;
4856     unsigned NextLaneIndex;
4857     SMLoc EndLoc = Parser.getTok().getLoc();
4858     if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4859       return ParseStatus::Failure;
4860     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4861       return Error(EndLoc, "mismatched lane index in register list");
4862   }
4863 
4864   if (Parser.getTok().isNot(AsmToken::RCurly))
4865     return Error(Parser.getTok().getLoc(), "'}' expected");
4866   E = Parser.getTok().getEndLoc();
4867   Parser.Lex(); // Eat '}' token.
4868 
4869   switch (LaneKind) {
4870   case NoLanes:
4871   case AllLanes: {
4872     // Two-register operands have been converted to the
4873     // composite register classes.
4874     if (Count == 2 && !hasMVE()) {
4875       const MCRegisterClass *RC = (Spacing == 1) ?
4876         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4877         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4878       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4879     }
4880     auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4881                    ARMOperand::CreateVectorListAllLanes);
4882     Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4883     break;
4884   }
4885   case IndexedLane:
4886     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4887                                                            LaneIndex,
4888                                                            (Spacing == 2),
4889                                                            S, E));
4890     break;
4891   }
4892   return ParseStatus::Success;
4893 }
4894 
4895 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4896 ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4897   MCAsmParser &Parser = getParser();
4898   SMLoc S = Parser.getTok().getLoc();
4899   const AsmToken &Tok = Parser.getTok();
4900   unsigned Opt;
4901 
4902   if (Tok.is(AsmToken::Identifier)) {
4903     StringRef OptStr = Tok.getString();
4904 
4905     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4906       .Case("sy",    ARM_MB::SY)
4907       .Case("st",    ARM_MB::ST)
4908       .Case("ld",    ARM_MB::LD)
4909       .Case("sh",    ARM_MB::ISH)
4910       .Case("ish",   ARM_MB::ISH)
4911       .Case("shst",  ARM_MB::ISHST)
4912       .Case("ishst", ARM_MB::ISHST)
4913       .Case("ishld", ARM_MB::ISHLD)
4914       .Case("nsh",   ARM_MB::NSH)
4915       .Case("un",    ARM_MB::NSH)
4916       .Case("nshst", ARM_MB::NSHST)
4917       .Case("nshld", ARM_MB::NSHLD)
4918       .Case("unst",  ARM_MB::NSHST)
4919       .Case("osh",   ARM_MB::OSH)
4920       .Case("oshst", ARM_MB::OSHST)
4921       .Case("oshld", ARM_MB::OSHLD)
4922       .Default(~0U);
4923 
4924     // ishld, oshld, nshld and ld are only available from ARMv8.
4925     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4926                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4927       Opt = ~0U;
4928 
4929     if (Opt == ~0U)
4930       return ParseStatus::NoMatch;
4931 
4932     Parser.Lex(); // Eat identifier token.
4933   } else if (Tok.is(AsmToken::Hash) ||
4934              Tok.is(AsmToken::Dollar) ||
4935              Tok.is(AsmToken::Integer)) {
4936     if (Parser.getTok().isNot(AsmToken::Integer))
4937       Parser.Lex(); // Eat '#' or '$'.
4938     SMLoc Loc = Parser.getTok().getLoc();
4939 
4940     const MCExpr *MemBarrierID;
4941     if (getParser().parseExpression(MemBarrierID))
4942       return Error(Loc, "illegal expression");
4943 
4944     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4945     if (!CE)
4946       return Error(Loc, "constant expression expected");
4947 
4948     int Val = CE->getValue();
4949     if (Val & ~0xf)
4950       return Error(Loc, "immediate value out of range");
4951 
4952     Opt = ARM_MB::RESERVED_0 + Val;
4953   } else
4954     return ParseStatus::Failure;
4955 
4956   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4957   return ParseStatus::Success;
4958 }
4959 
4960 ParseStatus
4961 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4962   MCAsmParser &Parser = getParser();
4963   SMLoc S = Parser.getTok().getLoc();
4964   const AsmToken &Tok = Parser.getTok();
4965 
4966   if (Tok.isNot(AsmToken::Identifier))
4967     return ParseStatus::NoMatch;
4968 
4969   if (!Tok.getString().equals_insensitive("csync"))
4970     return ParseStatus::NoMatch;
4971 
4972   Parser.Lex(); // Eat identifier token.
4973 
4974   Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4975   return ParseStatus::Success;
4976 }
4977 
4978 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4979 ParseStatus
4980 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4981   MCAsmParser &Parser = getParser();
4982   SMLoc S = Parser.getTok().getLoc();
4983   const AsmToken &Tok = Parser.getTok();
4984   unsigned Opt;
4985 
4986   if (Tok.is(AsmToken::Identifier)) {
4987     StringRef OptStr = Tok.getString();
4988 
4989     if (OptStr.equals_insensitive("sy"))
4990       Opt = ARM_ISB::SY;
4991     else
4992       return ParseStatus::NoMatch;
4993 
4994     Parser.Lex(); // Eat identifier token.
4995   } else if (Tok.is(AsmToken::Hash) ||
4996              Tok.is(AsmToken::Dollar) ||
4997              Tok.is(AsmToken::Integer)) {
4998     if (Parser.getTok().isNot(AsmToken::Integer))
4999       Parser.Lex(); // Eat '#' or '$'.
5000     SMLoc Loc = Parser.getTok().getLoc();
5001 
5002     const MCExpr *ISBarrierID;
5003     if (getParser().parseExpression(ISBarrierID))
5004       return Error(Loc, "illegal expression");
5005 
5006     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5007     if (!CE)
5008       return Error(Loc, "constant expression expected");
5009 
5010     int Val = CE->getValue();
5011     if (Val & ~0xf)
5012       return Error(Loc, "immediate value out of range");
5013 
5014     Opt = ARM_ISB::RESERVED_0 + Val;
5015   } else
5016     return ParseStatus::Failure;
5017 
5018   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5019           (ARM_ISB::InstSyncBOpt)Opt, S));
5020   return ParseStatus::Success;
5021 }
5022 
5023 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5024 ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5025   MCAsmParser &Parser = getParser();
5026   SMLoc S = Parser.getTok().getLoc();
5027   const AsmToken &Tok = Parser.getTok();
5028   if (!Tok.is(AsmToken::Identifier))
5029     return ParseStatus::NoMatch;
5030   StringRef IFlagsStr = Tok.getString();
5031 
5032   // An iflags string of "none" is interpreted to mean that none of the AIF
5033   // bits are set.  Not a terribly useful instruction, but a valid encoding.
5034   unsigned IFlags = 0;
5035   if (IFlagsStr != "none") {
5036         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5037       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5038         .Case("a", ARM_PROC::A)
5039         .Case("i", ARM_PROC::I)
5040         .Case("f", ARM_PROC::F)
5041         .Default(~0U);
5042 
5043       // If some specific iflag is already set, it means that some letter is
5044       // present more than once, this is not acceptable.
5045       if (Flag == ~0U || (IFlags & Flag))
5046         return ParseStatus::NoMatch;
5047 
5048       IFlags |= Flag;
5049     }
5050   }
5051 
5052   Parser.Lex(); // Eat identifier token.
5053   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5054   return ParseStatus::Success;
5055 }
5056 
5057 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5058 ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5059   MCAsmParser &Parser = getParser();
5060   SMLoc S = Parser.getTok().getLoc();
5061   const AsmToken &Tok = Parser.getTok();
5062 
5063   if (Tok.is(AsmToken::Integer)) {
5064     int64_t Val = Tok.getIntVal();
5065     if (Val > 255 || Val < 0) {
5066       return ParseStatus::NoMatch;
5067     }
5068     unsigned SYSmvalue = Val & 0xFF;
5069     Parser.Lex();
5070     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5071     return ParseStatus::Success;
5072   }
5073 
5074   if (!Tok.is(AsmToken::Identifier))
5075     return ParseStatus::NoMatch;
5076   StringRef Mask = Tok.getString();
5077 
5078   if (isMClass()) {
5079     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5080     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5081       return ParseStatus::NoMatch;
5082 
5083     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5084 
5085     Parser.Lex(); // Eat identifier token.
5086     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5087     return ParseStatus::Success;
5088   }
5089 
5090   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5091   size_t Start = 0, Next = Mask.find('_');
5092   StringRef Flags = "";
5093   std::string SpecReg = Mask.slice(Start, Next).lower();
5094   if (Next != StringRef::npos)
5095     Flags = Mask.slice(Next+1, Mask.size());
5096 
5097   // FlagsVal contains the complete mask:
5098   // 3-0: Mask
5099   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5100   unsigned FlagsVal = 0;
5101 
5102   if (SpecReg == "apsr") {
5103     FlagsVal = StringSwitch<unsigned>(Flags)
5104     .Case("nzcvq",  0x8) // same as CPSR_f
5105     .Case("g",      0x4) // same as CPSR_s
5106     .Case("nzcvqg", 0xc) // same as CPSR_fs
5107     .Default(~0U);
5108 
5109     if (FlagsVal == ~0U) {
5110       if (!Flags.empty())
5111         return ParseStatus::NoMatch;
5112       else
5113         FlagsVal = 8; // No flag
5114     }
5115   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5116     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5117     if (Flags == "all" || Flags == "")
5118       Flags = "fc";
5119     for (int i = 0, e = Flags.size(); i != e; ++i) {
5120       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5121       .Case("c", 1)
5122       .Case("x", 2)
5123       .Case("s", 4)
5124       .Case("f", 8)
5125       .Default(~0U);
5126 
5127       // If some specific flag is already set, it means that some letter is
5128       // present more than once, this is not acceptable.
5129       if (Flag == ~0U || (FlagsVal & Flag))
5130         return ParseStatus::NoMatch;
5131       FlagsVal |= Flag;
5132     }
5133   } else // No match for special register.
5134     return ParseStatus::NoMatch;
5135 
5136   // Special register without flags is NOT equivalent to "fc" flags.
5137   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
5138   // two lines would enable gas compatibility at the expense of breaking
5139   // round-tripping.
5140   //
5141   // if (!FlagsVal)
5142   //  FlagsVal = 0x9;
5143 
5144   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5145   if (SpecReg == "spsr")
5146     FlagsVal |= 16;
5147 
5148   Parser.Lex(); // Eat identifier token.
5149   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5150   return ParseStatus::Success;
5151 }
5152 
5153 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5154 /// use in the MRS/MSR instructions added to support virtualization.
5155 ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5156   MCAsmParser &Parser = getParser();
5157   SMLoc S = Parser.getTok().getLoc();
5158   const AsmToken &Tok = Parser.getTok();
5159   if (!Tok.is(AsmToken::Identifier))
5160     return ParseStatus::NoMatch;
5161   StringRef RegName = Tok.getString();
5162 
5163   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5164   if (!TheReg)
5165     return ParseStatus::NoMatch;
5166   unsigned Encoding = TheReg->Encoding;
5167 
5168   Parser.Lex(); // Eat identifier token.
5169   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5170   return ParseStatus::Success;
5171 }
5172 
5173 ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op,
5174                                       int Low, int High) {
5175   MCAsmParser &Parser = getParser();
5176   const AsmToken &Tok = Parser.getTok();
5177   if (Tok.isNot(AsmToken::Identifier))
5178     return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5179   StringRef ShiftName = Tok.getString();
5180   std::string LowerOp = Op.lower();
5181   std::string UpperOp = Op.upper();
5182   if (ShiftName != LowerOp && ShiftName != UpperOp)
5183     return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5184   Parser.Lex(); // Eat shift type token.
5185 
5186   // There must be a '#' and a shift amount.
5187   if (Parser.getTok().isNot(AsmToken::Hash) &&
5188       Parser.getTok().isNot(AsmToken::Dollar))
5189     return Error(Parser.getTok().getLoc(), "'#' expected");
5190   Parser.Lex(); // Eat hash token.
5191 
5192   const MCExpr *ShiftAmount;
5193   SMLoc Loc = Parser.getTok().getLoc();
5194   SMLoc EndLoc;
5195   if (getParser().parseExpression(ShiftAmount, EndLoc))
5196     return Error(Loc, "illegal expression");
5197   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5198   if (!CE)
5199     return Error(Loc, "constant expression expected");
5200   int Val = CE->getValue();
5201   if (Val < Low || Val > High)
5202     return Error(Loc, "immediate value out of range");
5203 
5204   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5205 
5206   return ParseStatus::Success;
5207 }
5208 
5209 ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5210   MCAsmParser &Parser = getParser();
5211   const AsmToken &Tok = Parser.getTok();
5212   SMLoc S = Tok.getLoc();
5213   if (Tok.isNot(AsmToken::Identifier))
5214     return Error(S, "'be' or 'le' operand expected");
5215   int Val = StringSwitch<int>(Tok.getString().lower())
5216     .Case("be", 1)
5217     .Case("le", 0)
5218     .Default(-1);
5219   Parser.Lex(); // Eat the token.
5220 
5221   if (Val == -1)
5222     return Error(S, "'be' or 'le' operand expected");
5223   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5224                                                                   getContext()),
5225                                            S, Tok.getEndLoc()));
5226   return ParseStatus::Success;
5227 }
5228 
5229 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5230 /// instructions. Legal values are:
5231 ///     lsl #n  'n' in [0,31]
5232 ///     asr #n  'n' in [1,32]
5233 ///             n == 32 encoded as n == 0.
5234 ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5235   MCAsmParser &Parser = getParser();
5236   const AsmToken &Tok = Parser.getTok();
5237   SMLoc S = Tok.getLoc();
5238   if (Tok.isNot(AsmToken::Identifier))
5239     return Error(S, "shift operator 'asr' or 'lsl' expected");
5240   StringRef ShiftName = Tok.getString();
5241   bool isASR;
5242   if (ShiftName == "lsl" || ShiftName == "LSL")
5243     isASR = false;
5244   else if (ShiftName == "asr" || ShiftName == "ASR")
5245     isASR = true;
5246   else
5247     return Error(S, "shift operator 'asr' or 'lsl' expected");
5248   Parser.Lex(); // Eat the operator.
5249 
5250   // A '#' and a shift amount.
5251   if (Parser.getTok().isNot(AsmToken::Hash) &&
5252       Parser.getTok().isNot(AsmToken::Dollar))
5253     return Error(Parser.getTok().getLoc(), "'#' expected");
5254   Parser.Lex(); // Eat hash token.
5255   SMLoc ExLoc = Parser.getTok().getLoc();
5256 
5257   const MCExpr *ShiftAmount;
5258   SMLoc EndLoc;
5259   if (getParser().parseExpression(ShiftAmount, EndLoc))
5260     return Error(ExLoc, "malformed shift expression");
5261   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5262   if (!CE)
5263     return Error(ExLoc, "shift amount must be an immediate");
5264 
5265   int64_t Val = CE->getValue();
5266   if (isASR) {
5267     // Shift amount must be in [1,32]
5268     if (Val < 1 || Val > 32)
5269       return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5270     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5271     if (isThumb() && Val == 32)
5272       return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5273     if (Val == 32) Val = 0;
5274   } else {
5275     // Shift amount must be in [1,32]
5276     if (Val < 0 || Val > 31)
5277       return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5278   }
5279 
5280   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5281 
5282   return ParseStatus::Success;
5283 }
5284 
5285 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5286 /// of instructions. Legal values are:
5287 ///     ror #n  'n' in {0, 8, 16, 24}
5288 ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5289   MCAsmParser &Parser = getParser();
5290   const AsmToken &Tok = Parser.getTok();
5291   SMLoc S = Tok.getLoc();
5292   if (Tok.isNot(AsmToken::Identifier))
5293     return ParseStatus::NoMatch;
5294   StringRef ShiftName = Tok.getString();
5295   if (ShiftName != "ror" && ShiftName != "ROR")
5296     return ParseStatus::NoMatch;
5297   Parser.Lex(); // Eat the operator.
5298 
5299   // A '#' and a rotate amount.
5300   if (Parser.getTok().isNot(AsmToken::Hash) &&
5301       Parser.getTok().isNot(AsmToken::Dollar))
5302     return Error(Parser.getTok().getLoc(), "'#' expected");
5303   Parser.Lex(); // Eat hash token.
5304   SMLoc ExLoc = Parser.getTok().getLoc();
5305 
5306   const MCExpr *ShiftAmount;
5307   SMLoc EndLoc;
5308   if (getParser().parseExpression(ShiftAmount, EndLoc))
5309     return Error(ExLoc, "malformed rotate expression");
5310   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5311   if (!CE)
5312     return Error(ExLoc, "rotate amount must be an immediate");
5313 
5314   int64_t Val = CE->getValue();
5315   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5316   // normally, zero is represented in asm by omitting the rotate operand
5317   // entirely.
5318   if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5319     return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5320 
5321   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5322 
5323   return ParseStatus::Success;
5324 }
5325 
5326 ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5327   MCAsmParser &Parser = getParser();
5328   MCAsmLexer &Lexer = getLexer();
5329   int64_t Imm1, Imm2;
5330 
5331   SMLoc S = Parser.getTok().getLoc();
5332 
5333   // 1) A mod_imm operand can appear in the place of a register name:
5334   //   add r0, #mod_imm
5335   //   add r0, r0, #mod_imm
5336   // to correctly handle the latter, we bail out as soon as we see an
5337   // identifier.
5338   //
5339   // 2) Similarly, we do not want to parse into complex operands:
5340   //   mov r0, #mod_imm
5341   //   mov r0, :lower16:(_foo)
5342   if (Parser.getTok().is(AsmToken::Identifier) ||
5343       Parser.getTok().is(AsmToken::Colon))
5344     return ParseStatus::NoMatch;
5345 
5346   // Hash (dollar) is optional as per the ARMARM
5347   if (Parser.getTok().is(AsmToken::Hash) ||
5348       Parser.getTok().is(AsmToken::Dollar)) {
5349     // Avoid parsing into complex operands (#:)
5350     if (Lexer.peekTok().is(AsmToken::Colon))
5351       return ParseStatus::NoMatch;
5352 
5353     // Eat the hash (dollar)
5354     Parser.Lex();
5355   }
5356 
5357   SMLoc Sx1, Ex1;
5358   Sx1 = Parser.getTok().getLoc();
5359   const MCExpr *Imm1Exp;
5360   if (getParser().parseExpression(Imm1Exp, Ex1))
5361     return Error(Sx1, "malformed expression");
5362 
5363   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5364 
5365   if (CE) {
5366     // Immediate must fit within 32-bits
5367     Imm1 = CE->getValue();
5368     int Enc = ARM_AM::getSOImmVal(Imm1);
5369     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5370       // We have a match!
5371       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5372                                                   (Enc & 0xF00) >> 7,
5373                                                   Sx1, Ex1));
5374       return ParseStatus::Success;
5375     }
5376 
5377     // We have parsed an immediate which is not for us, fallback to a plain
5378     // immediate. This can happen for instruction aliases. For an example,
5379     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5380     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5381     // instruction with a mod_imm operand. The alias is defined such that the
5382     // parser method is shared, that's why we have to do this here.
5383     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5384       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5385       return ParseStatus::Success;
5386     }
5387   } else {
5388     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5389     // MCFixup). Fallback to a plain immediate.
5390     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5391     return ParseStatus::Success;
5392   }
5393 
5394   // From this point onward, we expect the input to be a (#bits, #rot) pair
5395   if (Parser.getTok().isNot(AsmToken::Comma))
5396     return Error(Sx1,
5397                  "expected modified immediate operand: #[0, 255], #even[0-30]");
5398 
5399   if (Imm1 & ~0xFF)
5400     return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5401 
5402   // Eat the comma
5403   Parser.Lex();
5404 
5405   // Repeat for #rot
5406   SMLoc Sx2, Ex2;
5407   Sx2 = Parser.getTok().getLoc();
5408 
5409   // Eat the optional hash (dollar)
5410   if (Parser.getTok().is(AsmToken::Hash) ||
5411       Parser.getTok().is(AsmToken::Dollar))
5412     Parser.Lex();
5413 
5414   const MCExpr *Imm2Exp;
5415   if (getParser().parseExpression(Imm2Exp, Ex2))
5416     return Error(Sx2, "malformed expression");
5417 
5418   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5419 
5420   if (CE) {
5421     Imm2 = CE->getValue();
5422     if (!(Imm2 & ~0x1E)) {
5423       // We have a match!
5424       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5425       return ParseStatus::Success;
5426     }
5427     return Error(Sx2,
5428                  "immediate operand must an even number in the range [0, 30]");
5429   } else {
5430     return Error(Sx2, "constant expression expected");
5431   }
5432 }
5433 
5434 ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5435   MCAsmParser &Parser = getParser();
5436   SMLoc S = Parser.getTok().getLoc();
5437   // The bitfield descriptor is really two operands, the LSB and the width.
5438   if (Parser.getTok().isNot(AsmToken::Hash) &&
5439       Parser.getTok().isNot(AsmToken::Dollar))
5440     return Error(Parser.getTok().getLoc(), "'#' expected");
5441   Parser.Lex(); // Eat hash token.
5442 
5443   const MCExpr *LSBExpr;
5444   SMLoc E = Parser.getTok().getLoc();
5445   if (getParser().parseExpression(LSBExpr))
5446     return Error(E, "malformed immediate expression");
5447   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5448   if (!CE)
5449     return Error(E, "'lsb' operand must be an immediate");
5450 
5451   int64_t LSB = CE->getValue();
5452   // The LSB must be in the range [0,31]
5453   if (LSB < 0 || LSB > 31)
5454     return Error(E, "'lsb' operand must be in the range [0,31]");
5455   E = Parser.getTok().getLoc();
5456 
5457   // Expect another immediate operand.
5458   if (Parser.getTok().isNot(AsmToken::Comma))
5459     return Error(Parser.getTok().getLoc(), "too few operands");
5460   Parser.Lex(); // Eat hash token.
5461   if (Parser.getTok().isNot(AsmToken::Hash) &&
5462       Parser.getTok().isNot(AsmToken::Dollar))
5463     return Error(Parser.getTok().getLoc(), "'#' expected");
5464   Parser.Lex(); // Eat hash token.
5465 
5466   const MCExpr *WidthExpr;
5467   SMLoc EndLoc;
5468   if (getParser().parseExpression(WidthExpr, EndLoc))
5469     return Error(E, "malformed immediate expression");
5470   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5471   if (!CE)
5472     return Error(E, "'width' operand must be an immediate");
5473 
5474   int64_t Width = CE->getValue();
5475   // The LSB must be in the range [1,32-lsb]
5476   if (Width < 1 || Width > 32 - LSB)
5477     return Error(E, "'width' operand must be in the range [1,32-lsb]");
5478 
5479   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5480 
5481   return ParseStatus::Success;
5482 }
5483 
5484 ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5485   // Check for a post-index addressing register operand. Specifically:
5486   // postidx_reg := '+' register {, shift}
5487   //              | '-' register {, shift}
5488   //              | register {, shift}
5489 
5490   // This method must return ParseStatus::NoMatch without consuming any tokens
5491   // in the case where there is no match, as other alternatives take other
5492   // parse methods.
5493   MCAsmParser &Parser = getParser();
5494   AsmToken Tok = Parser.getTok();
5495   SMLoc S = Tok.getLoc();
5496   bool haveEaten = false;
5497   bool isAdd = true;
5498   if (Tok.is(AsmToken::Plus)) {
5499     Parser.Lex(); // Eat the '+' token.
5500     haveEaten = true;
5501   } else if (Tok.is(AsmToken::Minus)) {
5502     Parser.Lex(); // Eat the '-' token.
5503     isAdd = false;
5504     haveEaten = true;
5505   }
5506 
5507   SMLoc E = Parser.getTok().getEndLoc();
5508   int Reg = tryParseRegister();
5509   if (Reg == -1) {
5510     if (!haveEaten)
5511       return ParseStatus::NoMatch;
5512     return Error(Parser.getTok().getLoc(), "register expected");
5513   }
5514 
5515   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5516   unsigned ShiftImm = 0;
5517   if (Parser.getTok().is(AsmToken::Comma)) {
5518     Parser.Lex(); // Eat the ','.
5519     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5520       return ParseStatus::Failure;
5521 
5522     // FIXME: Only approximates end...may include intervening whitespace.
5523     E = Parser.getTok().getLoc();
5524   }
5525 
5526   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5527                                                   ShiftImm, S, E));
5528 
5529   return ParseStatus::Success;
5530 }
5531 
5532 ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5533   // Check for a post-index addressing register operand. Specifically:
5534   // am3offset := '+' register
5535   //              | '-' register
5536   //              | register
5537   //              | # imm
5538   //              | # + imm
5539   //              | # - imm
5540 
5541   // This method must return ParseStatus::NoMatch without consuming any tokens
5542   // in the case where there is no match, as other alternatives take other
5543   // parse methods.
5544   MCAsmParser &Parser = getParser();
5545   AsmToken Tok = Parser.getTok();
5546   SMLoc S = Tok.getLoc();
5547 
5548   // Do immediates first, as we always parse those if we have a '#'.
5549   if (Parser.getTok().is(AsmToken::Hash) ||
5550       Parser.getTok().is(AsmToken::Dollar)) {
5551     Parser.Lex(); // Eat '#' or '$'.
5552     // Explicitly look for a '-', as we need to encode negative zero
5553     // differently.
5554     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5555     const MCExpr *Offset;
5556     SMLoc E;
5557     if (getParser().parseExpression(Offset, E))
5558       return ParseStatus::Failure;
5559     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5560     if (!CE)
5561       return Error(S, "constant expression expected");
5562     // Negative zero is encoded as the flag value
5563     // std::numeric_limits<int32_t>::min().
5564     int32_t Val = CE->getValue();
5565     if (isNegative && Val == 0)
5566       Val = std::numeric_limits<int32_t>::min();
5567 
5568     Operands.push_back(
5569       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5570 
5571     return ParseStatus::Success;
5572   }
5573 
5574   bool haveEaten = false;
5575   bool isAdd = true;
5576   if (Tok.is(AsmToken::Plus)) {
5577     Parser.Lex(); // Eat the '+' token.
5578     haveEaten = true;
5579   } else if (Tok.is(AsmToken::Minus)) {
5580     Parser.Lex(); // Eat the '-' token.
5581     isAdd = false;
5582     haveEaten = true;
5583   }
5584 
5585   Tok = Parser.getTok();
5586   int Reg = tryParseRegister();
5587   if (Reg == -1) {
5588     if (!haveEaten)
5589       return ParseStatus::NoMatch;
5590     return Error(Tok.getLoc(), "register expected");
5591   }
5592 
5593   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5594                                                   0, S, Tok.getEndLoc()));
5595 
5596   return ParseStatus::Success;
5597 }
5598 
5599 /// Convert parsed operands to MCInst.  Needed here because this instruction
5600 /// only has two register operands, but multiplication is commutative so
5601 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5602 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5603                                     const OperandVector &Operands) {
5604   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5605   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5606   // If we have a three-operand form, make sure to set Rn to be the operand
5607   // that isn't the same as Rd.
5608   unsigned RegOp = 4;
5609   if (Operands.size() == 6 &&
5610       ((ARMOperand &)*Operands[4]).getReg() ==
5611           ((ARMOperand &)*Operands[3]).getReg())
5612     RegOp = 5;
5613   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5614   Inst.addOperand(Inst.getOperand(0));
5615   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5616 }
5617 
5618 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5619                                     const OperandVector &Operands) {
5620   int CondOp = -1, ImmOp = -1;
5621   switch(Inst.getOpcode()) {
5622     case ARM::tB:
5623     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
5624 
5625     case ARM::t2B:
5626     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5627 
5628     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5629   }
5630   // first decide whether or not the branch should be conditional
5631   // by looking at it's location relative to an IT block
5632   if(inITBlock()) {
5633     // inside an IT block we cannot have any conditional branches. any
5634     // such instructions needs to be converted to unconditional form
5635     switch(Inst.getOpcode()) {
5636       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5637       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5638     }
5639   } else {
5640     // outside IT blocks we can only have unconditional branches with AL
5641     // condition code or conditional branches with non-AL condition code
5642     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5643     switch(Inst.getOpcode()) {
5644       case ARM::tB:
5645       case ARM::tBcc:
5646         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5647         break;
5648       case ARM::t2B:
5649       case ARM::t2Bcc:
5650         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5651         break;
5652     }
5653   }
5654 
5655   // now decide on encoding size based on branch target range
5656   switch(Inst.getOpcode()) {
5657     // classify tB as either t2B or t1B based on range of immediate operand
5658     case ARM::tB: {
5659       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5660       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5661         Inst.setOpcode(ARM::t2B);
5662       break;
5663     }
5664     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5665     case ARM::tBcc: {
5666       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5667       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5668         Inst.setOpcode(ARM::t2Bcc);
5669       break;
5670     }
5671   }
5672   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5673   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5674 }
5675 
5676 void ARMAsmParser::cvtMVEVMOVQtoDReg(
5677   MCInst &Inst, const OperandVector &Operands) {
5678 
5679   // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5680   assert(Operands.size() == 8);
5681 
5682   ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5683   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5684   ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5685   ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5686   // skip second copy of Qd in Operands[6]
5687   ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5688   ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5689 }
5690 
5691 /// Parse an ARM memory expression, return false if successful else return true
5692 /// or an error.  The first token must be a '[' when called.
5693 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5694   MCAsmParser &Parser = getParser();
5695   SMLoc S, E;
5696   if (Parser.getTok().isNot(AsmToken::LBrac))
5697     return TokError("Token is not a Left Bracket");
5698   S = Parser.getTok().getLoc();
5699   Parser.Lex(); // Eat left bracket token.
5700 
5701   const AsmToken &BaseRegTok = Parser.getTok();
5702   int BaseRegNum = tryParseRegister();
5703   if (BaseRegNum == -1)
5704     return Error(BaseRegTok.getLoc(), "register expected");
5705 
5706   // The next token must either be a comma, a colon or a closing bracket.
5707   const AsmToken &Tok = Parser.getTok();
5708   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5709       !Tok.is(AsmToken::RBrac))
5710     return Error(Tok.getLoc(), "malformed memory operand");
5711 
5712   if (Tok.is(AsmToken::RBrac)) {
5713     E = Tok.getEndLoc();
5714     Parser.Lex(); // Eat right bracket token.
5715 
5716     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5717                                              ARM_AM::no_shift, 0, 0, false,
5718                                              S, E));
5719 
5720     // If there's a pre-indexing writeback marker, '!', just add it as a token
5721     // operand. It's rather odd, but syntactically valid.
5722     if (Parser.getTok().is(AsmToken::Exclaim)) {
5723       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5724       Parser.Lex(); // Eat the '!'.
5725     }
5726 
5727     return false;
5728   }
5729 
5730   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5731          "Lost colon or comma in memory operand?!");
5732   if (Tok.is(AsmToken::Comma)) {
5733     Parser.Lex(); // Eat the comma.
5734   }
5735 
5736   // If we have a ':', it's an alignment specifier.
5737   if (Parser.getTok().is(AsmToken::Colon)) {
5738     Parser.Lex(); // Eat the ':'.
5739     E = Parser.getTok().getLoc();
5740     SMLoc AlignmentLoc = Tok.getLoc();
5741 
5742     const MCExpr *Expr;
5743     if (getParser().parseExpression(Expr))
5744      return true;
5745 
5746     // The expression has to be a constant. Memory references with relocations
5747     // don't come through here, as they use the <label> forms of the relevant
5748     // instructions.
5749     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5750     if (!CE)
5751       return Error (E, "constant expression expected");
5752 
5753     unsigned Align = 0;
5754     switch (CE->getValue()) {
5755     default:
5756       return Error(E,
5757                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5758     case 16:  Align = 2; break;
5759     case 32:  Align = 4; break;
5760     case 64:  Align = 8; break;
5761     case 128: Align = 16; break;
5762     case 256: Align = 32; break;
5763     }
5764 
5765     // Now we should have the closing ']'
5766     if (Parser.getTok().isNot(AsmToken::RBrac))
5767       return Error(Parser.getTok().getLoc(), "']' expected");
5768     E = Parser.getTok().getEndLoc();
5769     Parser.Lex(); // Eat right bracket token.
5770 
5771     // Don't worry about range checking the value here. That's handled by
5772     // the is*() predicates.
5773     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5774                                              ARM_AM::no_shift, 0, Align,
5775                                              false, S, E, AlignmentLoc));
5776 
5777     // If there's a pre-indexing writeback marker, '!', just add it as a token
5778     // operand.
5779     if (Parser.getTok().is(AsmToken::Exclaim)) {
5780       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5781       Parser.Lex(); // Eat the '!'.
5782     }
5783 
5784     return false;
5785   }
5786 
5787   // If we have a '#' or '$', it's an immediate offset, else assume it's a
5788   // register offset. Be friendly and also accept a plain integer or expression
5789   // (without a leading hash) for gas compatibility.
5790   if (Parser.getTok().is(AsmToken::Hash) ||
5791       Parser.getTok().is(AsmToken::Dollar) ||
5792       Parser.getTok().is(AsmToken::LParen) ||
5793       Parser.getTok().is(AsmToken::Integer)) {
5794     if (Parser.getTok().is(AsmToken::Hash) ||
5795         Parser.getTok().is(AsmToken::Dollar))
5796       Parser.Lex(); // Eat '#' or '$'
5797     E = Parser.getTok().getLoc();
5798 
5799     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5800     const MCExpr *Offset, *AdjustedOffset;
5801     if (getParser().parseExpression(Offset))
5802      return true;
5803 
5804     if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
5805       // If the constant was #-0, represent it as
5806       // std::numeric_limits<int32_t>::min().
5807       int32_t Val = CE->getValue();
5808       if (isNegative && Val == 0)
5809         CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5810                                     getContext());
5811       // Don't worry about range checking the value here. That's handled by
5812       // the is*() predicates.
5813       AdjustedOffset = CE;
5814     } else
5815       AdjustedOffset = Offset;
5816     Operands.push_back(ARMOperand::CreateMem(
5817         BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
5818 
5819     // Now we should have the closing ']'
5820     if (Parser.getTok().isNot(AsmToken::RBrac))
5821       return Error(Parser.getTok().getLoc(), "']' expected");
5822     E = Parser.getTok().getEndLoc();
5823     Parser.Lex(); // Eat right bracket token.
5824 
5825     // If there's a pre-indexing writeback marker, '!', just add it as a token
5826     // operand.
5827     if (Parser.getTok().is(AsmToken::Exclaim)) {
5828       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5829       Parser.Lex(); // Eat the '!'.
5830     }
5831 
5832     return false;
5833   }
5834 
5835   // The register offset is optionally preceded by a '+' or '-'
5836   bool isNegative = false;
5837   if (Parser.getTok().is(AsmToken::Minus)) {
5838     isNegative = true;
5839     Parser.Lex(); // Eat the '-'.
5840   } else if (Parser.getTok().is(AsmToken::Plus)) {
5841     // Nothing to do.
5842     Parser.Lex(); // Eat the '+'.
5843   }
5844 
5845   E = Parser.getTok().getLoc();
5846   int OffsetRegNum = tryParseRegister();
5847   if (OffsetRegNum == -1)
5848     return Error(E, "register expected");
5849 
5850   // If there's a shift operator, handle it.
5851   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5852   unsigned ShiftImm = 0;
5853   if (Parser.getTok().is(AsmToken::Comma)) {
5854     Parser.Lex(); // Eat the ','.
5855     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5856       return true;
5857   }
5858 
5859   // Now we should have the closing ']'
5860   if (Parser.getTok().isNot(AsmToken::RBrac))
5861     return Error(Parser.getTok().getLoc(), "']' expected");
5862   E = Parser.getTok().getEndLoc();
5863   Parser.Lex(); // Eat right bracket token.
5864 
5865   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5866                                            ShiftType, ShiftImm, 0, isNegative,
5867                                            S, E));
5868 
5869   // If there's a pre-indexing writeback marker, '!', just add it as a token
5870   // operand.
5871   if (Parser.getTok().is(AsmToken::Exclaim)) {
5872     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5873     Parser.Lex(); // Eat the '!'.
5874   }
5875 
5876   return false;
5877 }
5878 
5879 /// parseMemRegOffsetShift - one of these two:
5880 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5881 ///   rrx
5882 /// return true if it parses a shift otherwise it returns false.
5883 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5884                                           unsigned &Amount) {
5885   MCAsmParser &Parser = getParser();
5886   SMLoc Loc = Parser.getTok().getLoc();
5887   const AsmToken &Tok = Parser.getTok();
5888   if (Tok.isNot(AsmToken::Identifier))
5889     return Error(Loc, "illegal shift operator");
5890   StringRef ShiftName = Tok.getString();
5891   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5892       ShiftName == "asl" || ShiftName == "ASL")
5893     St = ARM_AM::lsl;
5894   else if (ShiftName == "lsr" || ShiftName == "LSR")
5895     St = ARM_AM::lsr;
5896   else if (ShiftName == "asr" || ShiftName == "ASR")
5897     St = ARM_AM::asr;
5898   else if (ShiftName == "ror" || ShiftName == "ROR")
5899     St = ARM_AM::ror;
5900   else if (ShiftName == "rrx" || ShiftName == "RRX")
5901     St = ARM_AM::rrx;
5902   else if (ShiftName == "uxtw" || ShiftName == "UXTW")
5903     St = ARM_AM::uxtw;
5904   else
5905     return Error(Loc, "illegal shift operator");
5906   Parser.Lex(); // Eat shift type token.
5907 
5908   // rrx stands alone.
5909   Amount = 0;
5910   if (St != ARM_AM::rrx) {
5911     Loc = Parser.getTok().getLoc();
5912     // A '#' and a shift amount.
5913     const AsmToken &HashTok = Parser.getTok();
5914     if (HashTok.isNot(AsmToken::Hash) &&
5915         HashTok.isNot(AsmToken::Dollar))
5916       return Error(HashTok.getLoc(), "'#' expected");
5917     Parser.Lex(); // Eat hash token.
5918 
5919     const MCExpr *Expr;
5920     if (getParser().parseExpression(Expr))
5921       return true;
5922     // Range check the immediate.
5923     // lsl, ror: 0 <= imm <= 31
5924     // lsr, asr: 0 <= imm <= 32
5925     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5926     if (!CE)
5927       return Error(Loc, "shift amount must be an immediate");
5928     int64_t Imm = CE->getValue();
5929     if (Imm < 0 ||
5930         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5931         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5932       return Error(Loc, "immediate shift value out of range");
5933     // If <ShiftTy> #0, turn it into a no_shift.
5934     if (Imm == 0)
5935       St = ARM_AM::lsl;
5936     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5937     if (Imm == 32)
5938       Imm = 0;
5939     Amount = Imm;
5940   }
5941 
5942   return false;
5943 }
5944 
5945 /// parseFPImm - A floating point immediate expression operand.
5946 ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
5947   MCAsmParser &Parser = getParser();
5948   // Anything that can accept a floating point constant as an operand
5949   // needs to go through here, as the regular parseExpression is
5950   // integer only.
5951   //
5952   // This routine still creates a generic Immediate operand, containing
5953   // a bitcast of the 64-bit floating point value. The various operands
5954   // that accept floats can check whether the value is valid for them
5955   // via the standard is*() predicates.
5956 
5957   SMLoc S = Parser.getTok().getLoc();
5958 
5959   if (Parser.getTok().isNot(AsmToken::Hash) &&
5960       Parser.getTok().isNot(AsmToken::Dollar))
5961     return ParseStatus::NoMatch;
5962 
5963   // Disambiguate the VMOV forms that can accept an FP immediate.
5964   // vmov.f32 <sreg>, #imm
5965   // vmov.f64 <dreg>, #imm
5966   // vmov.f32 <dreg>, #imm  @ vector f32x2
5967   // vmov.f32 <qreg>, #imm  @ vector f32x4
5968   //
5969   // There are also the NEON VMOV instructions which expect an
5970   // integer constant. Make sure we don't try to parse an FPImm
5971   // for these:
5972   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5973   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5974   bool isVmovf = TyOp.isToken() &&
5975                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5976                   TyOp.getToken() == ".f16");
5977   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5978   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5979                                          Mnemonic.getToken() == "fconsts");
5980   if (!(isVmovf || isFconst))
5981     return ParseStatus::NoMatch;
5982 
5983   Parser.Lex(); // Eat '#' or '$'.
5984 
5985   // Handle negation, as that still comes through as a separate token.
5986   bool isNegative = false;
5987   if (Parser.getTok().is(AsmToken::Minus)) {
5988     isNegative = true;
5989     Parser.Lex();
5990   }
5991   const AsmToken &Tok = Parser.getTok();
5992   SMLoc Loc = Tok.getLoc();
5993   if (Tok.is(AsmToken::Real) && isVmovf) {
5994     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5995     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5996     // If we had a '-' in front, toggle the sign bit.
5997     IntVal ^= (uint64_t)isNegative << 31;
5998     Parser.Lex(); // Eat the token.
5999     Operands.push_back(ARMOperand::CreateImm(
6000           MCConstantExpr::create(IntVal, getContext()),
6001           S, Parser.getTok().getLoc()));
6002     return ParseStatus::Success;
6003   }
6004   // Also handle plain integers. Instructions which allow floating point
6005   // immediates also allow a raw encoded 8-bit value.
6006   if (Tok.is(AsmToken::Integer) && isFconst) {
6007     int64_t Val = Tok.getIntVal();
6008     Parser.Lex(); // Eat the token.
6009     if (Val > 255 || Val < 0)
6010       return Error(Loc, "encoded floating point value out of range");
6011     float RealVal = ARM_AM::getFPImmFloat(Val);
6012     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6013 
6014     Operands.push_back(ARMOperand::CreateImm(
6015         MCConstantExpr::create(Val, getContext()), S,
6016         Parser.getTok().getLoc()));
6017     return ParseStatus::Success;
6018   }
6019 
6020   return Error(Loc, "invalid floating point immediate");
6021 }
6022 
6023 /// Parse a arm instruction operand.  For now this parses the operand regardless
6024 /// of the mnemonic.
6025 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6026   MCAsmParser &Parser = getParser();
6027   SMLoc S, E;
6028 
6029   // Check if the current operand has a custom associated parser, if so, try to
6030   // custom parse the operand, or fallback to the general approach.
6031   ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6032   if (ResTy.isSuccess())
6033     return false;
6034   // If there wasn't a custom match, try the generic matcher below. Otherwise,
6035   // there was a match, but an error occurred, in which case, just return that
6036   // the operand parsing failed.
6037   if (ResTy.isFailure())
6038     return true;
6039 
6040   switch (getLexer().getKind()) {
6041   default:
6042     Error(Parser.getTok().getLoc(), "unexpected token in operand");
6043     return true;
6044   case AsmToken::Identifier: {
6045     // If we've seen a branch mnemonic, the next operand must be a label.  This
6046     // is true even if the label is a register name.  So "br r1" means branch to
6047     // label "r1".
6048     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6049     if (!ExpectLabel) {
6050       if (!tryParseRegisterWithWriteBack(Operands))
6051         return false;
6052       int Res = tryParseShiftRegister(Operands);
6053       if (Res == 0) // success
6054         return false;
6055       else if (Res == -1) // irrecoverable error
6056         return true;
6057       // If this is VMRS, check for the apsr_nzcv operand.
6058       if (Mnemonic == "vmrs" &&
6059           Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6060         S = Parser.getTok().getLoc();
6061         Parser.Lex();
6062         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6063         return false;
6064       }
6065     }
6066 
6067     // Fall though for the Identifier case that is not a register or a
6068     // special name.
6069     [[fallthrough]];
6070   }
6071   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
6072   case AsmToken::Integer: // things like 1f and 2b as a branch targets
6073   case AsmToken::String:  // quoted label names.
6074   case AsmToken::Dot: {   // . as a branch target
6075     // This was not a register so parse other operands that start with an
6076     // identifier (like labels) as expressions and create them as immediates.
6077     const MCExpr *IdVal;
6078     S = Parser.getTok().getLoc();
6079     if (getParser().parseExpression(IdVal))
6080       return true;
6081     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6082     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6083     return false;
6084   }
6085   case AsmToken::LBrac:
6086     return parseMemory(Operands);
6087   case AsmToken::LCurly:
6088     return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
6089   case AsmToken::Dollar:
6090   case AsmToken::Hash: {
6091     // #42 -> immediate
6092     // $ 42 -> immediate
6093     // $foo -> symbol name
6094     // $42 -> symbol name
6095     S = Parser.getTok().getLoc();
6096 
6097     // Favor the interpretation of $-prefixed operands as symbol names.
6098     // Cases where immediates are explicitly expected are handled by their
6099     // specific ParseMethod implementations.
6100     auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6101     bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6102                             (AdjacentToken.is(AsmToken::Identifier) ||
6103                              AdjacentToken.is(AsmToken::Integer));
6104     if (!ExpectIdentifier) {
6105       // Token is not part of identifier. Drop leading $ or # before parsing
6106       // expression.
6107       Parser.Lex();
6108     }
6109 
6110     if (Parser.getTok().isNot(AsmToken::Colon)) {
6111       bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6112       const MCExpr *ImmVal;
6113       if (getParser().parseExpression(ImmVal))
6114         return true;
6115       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6116       if (CE) {
6117         int32_t Val = CE->getValue();
6118         if (IsNegative && Val == 0)
6119           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6120                                           getContext());
6121       }
6122       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6123       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6124 
6125       // There can be a trailing '!' on operands that we want as a separate
6126       // '!' Token operand. Handle that here. For example, the compatibility
6127       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6128       if (Parser.getTok().is(AsmToken::Exclaim)) {
6129         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6130                                                    Parser.getTok().getLoc()));
6131         Parser.Lex(); // Eat exclaim token
6132       }
6133       return false;
6134     }
6135     // w/ a ':' after the '#', it's just like a plain ':'.
6136     [[fallthrough]];
6137   }
6138   case AsmToken::Colon: {
6139     S = Parser.getTok().getLoc();
6140     // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6141     // ":upper8_15:", expression prefixes
6142     // FIXME: Check it's an expression prefix,
6143     // e.g. (FOO - :lower16:BAR) isn't legal.
6144     ARMMCExpr::VariantKind RefKind;
6145     if (parsePrefix(RefKind))
6146       return true;
6147 
6148     const MCExpr *SubExprVal;
6149     if (getParser().parseExpression(SubExprVal))
6150       return true;
6151 
6152     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6153                                               getContext());
6154     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6155     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6156     return false;
6157   }
6158   case AsmToken::Equal: {
6159     S = Parser.getTok().getLoc();
6160     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6161       return Error(S, "unexpected token in operand");
6162     Parser.Lex(); // Eat '='
6163     const MCExpr *SubExprVal;
6164     if (getParser().parseExpression(SubExprVal))
6165       return true;
6166     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6167 
6168     // execute-only: we assume that assembly programmers know what they are
6169     // doing and allow literal pool creation here
6170     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6171     return false;
6172   }
6173   }
6174 }
6175 
6176 bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6177   const MCExpr *Expr = nullptr;
6178   SMLoc L = getParser().getTok().getLoc();
6179   if (check(getParser().parseExpression(Expr), L, "expected expression"))
6180     return true;
6181   const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6182   if (check(!Value, L, "expected constant expression"))
6183     return true;
6184   Out = Value->getValue();
6185   return false;
6186 }
6187 
6188 // parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6189 // :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6190 // :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
6191 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6192   MCAsmParser &Parser = getParser();
6193   RefKind = ARMMCExpr::VK_ARM_None;
6194 
6195   // consume an optional '#' (GNU compatibility)
6196   if (getLexer().is(AsmToken::Hash))
6197     Parser.Lex();
6198 
6199   assert(getLexer().is(AsmToken::Colon) && "expected a :");
6200   Parser.Lex(); // Eat ':'
6201 
6202   if (getLexer().isNot(AsmToken::Identifier)) {
6203     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6204     return true;
6205   }
6206 
6207   enum {
6208     COFF = (1 << MCContext::IsCOFF),
6209     ELF = (1 << MCContext::IsELF),
6210     MACHO = (1 << MCContext::IsMachO),
6211     WASM = (1 << MCContext::IsWasm),
6212   };
6213   static const struct PrefixEntry {
6214     const char *Spelling;
6215     ARMMCExpr::VariantKind VariantKind;
6216     uint8_t SupportedFormats;
6217   } PrefixEntries[] = {
6218       {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6219       {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6220       {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6221       {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6222       {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6223       {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6224   };
6225 
6226   StringRef IDVal = Parser.getTok().getIdentifier();
6227 
6228   const auto &Prefix =
6229       llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6230         return PE.Spelling == IDVal;
6231       });
6232   if (Prefix == std::end(PrefixEntries)) {
6233     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6234     return true;
6235   }
6236 
6237   uint8_t CurrentFormat;
6238   switch (getContext().getObjectFileType()) {
6239   case MCContext::IsMachO:
6240     CurrentFormat = MACHO;
6241     break;
6242   case MCContext::IsELF:
6243     CurrentFormat = ELF;
6244     break;
6245   case MCContext::IsCOFF:
6246     CurrentFormat = COFF;
6247     break;
6248   case MCContext::IsWasm:
6249     CurrentFormat = WASM;
6250     break;
6251   case MCContext::IsGOFF:
6252   case MCContext::IsSPIRV:
6253   case MCContext::IsXCOFF:
6254   case MCContext::IsDXContainer:
6255     llvm_unreachable("unexpected object format");
6256     break;
6257   }
6258 
6259   if (~Prefix->SupportedFormats & CurrentFormat) {
6260     Error(Parser.getTok().getLoc(),
6261           "cannot represent relocation in the current file format");
6262     return true;
6263   }
6264 
6265   RefKind = Prefix->VariantKind;
6266   Parser.Lex();
6267 
6268   if (getLexer().isNot(AsmToken::Colon)) {
6269     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6270     return true;
6271   }
6272   Parser.Lex(); // Eat the last ':'
6273 
6274   // consume an optional trailing '#' (GNU compatibility) bla
6275   parseOptionalToken(AsmToken::Hash);
6276 
6277   return false;
6278 }
6279 
6280 /// Given a mnemonic, split out possible predication code and carry
6281 /// setting letters to form a canonical mnemonic and flags.
6282 //
6283 // FIXME: Would be nice to autogen this.
6284 // FIXME: This is a bit of a maze of special cases.
6285 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
6286                                       StringRef ExtraToken,
6287                                       unsigned &PredicationCode,
6288                                       unsigned &VPTPredicationCode,
6289                                       bool &CarrySetting,
6290                                       unsigned &ProcessorIMod,
6291                                       StringRef &ITMask) {
6292   PredicationCode = ARMCC::AL;
6293   VPTPredicationCode = ARMVCC::None;
6294   CarrySetting = false;
6295   ProcessorIMod = 0;
6296 
6297   // Ignore some mnemonics we know aren't predicated forms.
6298   //
6299   // FIXME: Would be nice to autogen this.
6300   if ((Mnemonic == "movs" && isThumb()) ||
6301       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
6302       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
6303       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
6304       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
6305       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
6306       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
6307       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
6308       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
6309       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
6310       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
6311       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
6312       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6313       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
6314       Mnemonic == "bxns"  || Mnemonic == "blxns" ||
6315       Mnemonic == "vdot"  || Mnemonic == "vmmla" ||
6316       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6317       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6318       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6319       Mnemonic == "wls"   || Mnemonic == "le"    || Mnemonic == "dls" ||
6320       Mnemonic == "csel"  || Mnemonic == "csinc" ||
6321       Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
6322       Mnemonic == "cinv"  || Mnemonic == "cneg"  || Mnemonic == "cset" ||
6323       Mnemonic == "csetm" ||
6324       Mnemonic == "aut"   || Mnemonic == "pac" || Mnemonic == "pacbti" ||
6325       Mnemonic == "bti")
6326     return Mnemonic;
6327 
6328   // First, split out any predication code. Ignore mnemonics we know aren't
6329   // predicated but do have a carry-set and so weren't caught above.
6330   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6331       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6332       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6333       Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6334       !(hasMVE() &&
6335         (Mnemonic == "vmine" ||
6336          Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" ||
6337          Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6338          Mnemonic == "vmvne" || Mnemonic == "vorne" ||
6339          Mnemonic == "vnege" || Mnemonic == "vnegt" ||
6340          Mnemonic == "vmule" || Mnemonic == "vmult" ||
6341          Mnemonic == "vrintne" ||
6342          Mnemonic == "vcmult" || Mnemonic == "vcmule" ||
6343          Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6344          Mnemonic.startswith("vq")))) {
6345     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6346     if (CC != ~0U) {
6347       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6348       PredicationCode = CC;
6349     }
6350   }
6351 
6352   // Next, determine if we have a carry setting bit. We explicitly ignore all
6353   // the instructions we know end in 's'.
6354   if (Mnemonic.endswith("s") &&
6355       !(Mnemonic == "cps" || Mnemonic == "mls" ||
6356         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
6357         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
6358         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
6359         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
6360         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
6361         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
6362         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
6363         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
6364         Mnemonic == "bxns" || Mnemonic == "blxns" || Mnemonic == "vfmas" ||
6365         Mnemonic == "vmlas" ||
6366         (Mnemonic == "movs" && isThumb()))) {
6367     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6368     CarrySetting = true;
6369   }
6370 
6371   // The "cps" instruction can have a interrupt mode operand which is glued into
6372   // the mnemonic. Check if this is the case, split it and parse the imod op
6373   if (Mnemonic.startswith("cps")) {
6374     // Split out any imod code.
6375     unsigned IMod =
6376       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6377       .Case("ie", ARM_PROC::IE)
6378       .Case("id", ARM_PROC::ID)
6379       .Default(~0U);
6380     if (IMod != ~0U) {
6381       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6382       ProcessorIMod = IMod;
6383     }
6384   }
6385 
6386   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6387       Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6388       Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6389       Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6390       Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6391       Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6392       Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6393     unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
6394     if (CC != ~0U) {
6395       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6396       VPTPredicationCode = CC;
6397     }
6398     return Mnemonic;
6399   }
6400 
6401   // The "it" instruction has the condition mask on the end of the mnemonic.
6402   if (Mnemonic.startswith("it")) {
6403     ITMask = Mnemonic.slice(2, Mnemonic.size());
6404     Mnemonic = Mnemonic.slice(0, 2);
6405   }
6406 
6407   if (Mnemonic.startswith("vpst")) {
6408     ITMask = Mnemonic.slice(4, Mnemonic.size());
6409     Mnemonic = Mnemonic.slice(0, 4);
6410   }
6411   else if (Mnemonic.startswith("vpt")) {
6412     ITMask = Mnemonic.slice(3, Mnemonic.size());
6413     Mnemonic = Mnemonic.slice(0, 3);
6414   }
6415 
6416   return Mnemonic;
6417 }
6418 
6419 /// Given a canonical mnemonic, determine if the instruction ever allows
6420 /// inclusion of carry set or predication code operands.
6421 //
6422 // FIXME: It would be nice to autogen this.
6423 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6424                                          StringRef ExtraToken,
6425                                          StringRef FullInst,
6426                                          bool &CanAcceptCarrySet,
6427                                          bool &CanAcceptPredicationCode,
6428                                          bool &CanAcceptVPTPredicationCode) {
6429   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6430 
6431   CanAcceptCarrySet =
6432       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6433       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6434       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6435       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6436       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6437       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6438       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6439       (!isThumb() &&
6440        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6441         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6442 
6443   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6444       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6445       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6446       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
6447       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
6448       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6449       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6450       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6451       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
6452       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
6453       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
6454       Mnemonic == "vmovx" || Mnemonic == "vins" ||
6455       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6456       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6457       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6458       Mnemonic == "vfmat" || Mnemonic == "vfmab" ||
6459       Mnemonic == "vdot"  || Mnemonic == "vmmla" ||
6460       Mnemonic == "sb"    || Mnemonic == "ssbb"  ||
6461       Mnemonic == "pssbb" || Mnemonic == "vsmmla" ||
6462       Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6463       Mnemonic == "vusdot" || Mnemonic == "vsudot" ||
6464       Mnemonic == "bfcsel" || Mnemonic == "wls" ||
6465       Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
6466       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6467       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6468       Mnemonic == "cset" || Mnemonic == "csetm" ||
6469       (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6470        !MS.isITPredicableCDEInstr(Mnemonic)) ||
6471       Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst") ||
6472       Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6473       Mnemonic == "bti" ||
6474       (hasMVE() &&
6475        (Mnemonic.startswith("vst2") || Mnemonic.startswith("vld2") ||
6476         Mnemonic.startswith("vst4") || Mnemonic.startswith("vld4") ||
6477         Mnemonic.startswith("wlstp") || Mnemonic.startswith("dlstp") ||
6478         Mnemonic.startswith("letp")))) {
6479     // These mnemonics are never predicable
6480     CanAcceptPredicationCode = false;
6481   } else if (!isThumb()) {
6482     // Some instructions are only predicable in Thumb mode
6483     CanAcceptPredicationCode =
6484         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6485         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6486         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6487         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6488         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6489         Mnemonic != "stc2" && Mnemonic != "stc2l" &&
6490         Mnemonic != "tsb" &&
6491         !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
6492   } else if (isThumbOne()) {
6493     if (hasV6MOps())
6494       CanAcceptPredicationCode = Mnemonic != "movs";
6495     else
6496       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6497   } else
6498     CanAcceptPredicationCode = true;
6499 }
6500 
6501 // Some Thumb instructions have two operand forms that are not
6502 // available as three operand, convert to two operand form if possible.
6503 //
6504 // FIXME: We would really like to be able to tablegen'erate this.
6505 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6506                                                  bool CarrySetting,
6507                                                  OperandVector &Operands) {
6508   if (Operands.size() != 6)
6509     return;
6510 
6511   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6512         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6513   if (!Op3.isReg() || !Op4.isReg())
6514     return;
6515 
6516   auto Op3Reg = Op3.getReg();
6517   auto Op4Reg = Op4.getReg();
6518 
6519   // For most Thumb2 cases we just generate the 3 operand form and reduce
6520   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6521   // won't accept SP or PC so we do the transformation here taking care
6522   // with immediate range in the 'add sp, sp #imm' case.
6523   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6524   if (isThumbTwo()) {
6525     if (Mnemonic != "add")
6526       return;
6527     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6528                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6529     if (!TryTransform) {
6530       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6531                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6532                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6533                        Op5.isImm() && !Op5.isImm0_508s4());
6534     }
6535     if (!TryTransform)
6536       return;
6537   } else if (!isThumbOne())
6538     return;
6539 
6540   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6541         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6542         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6543         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6544     return;
6545 
6546   // If first 2 operands of a 3 operand instruction are the same
6547   // then transform to 2 operand version of the same instruction
6548   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6549   bool Transform = Op3Reg == Op4Reg;
6550 
6551   // For communtative operations, we might be able to transform if we swap
6552   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6553   // as tADDrsp.
6554   const ARMOperand *LastOp = &Op5;
6555   bool Swap = false;
6556   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6557       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6558        Mnemonic == "and" || Mnemonic == "eor" ||
6559        Mnemonic == "adc" || Mnemonic == "orr")) {
6560     Swap = true;
6561     LastOp = &Op4;
6562     Transform = true;
6563   }
6564 
6565   // If both registers are the same then remove one of them from
6566   // the operand list, with certain exceptions.
6567   if (Transform) {
6568     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6569     // 2 operand forms don't exist.
6570     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6571         LastOp->isReg())
6572       Transform = false;
6573 
6574     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6575     // 3-bits because the ARMARM says not to.
6576     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6577       Transform = false;
6578   }
6579 
6580   if (Transform) {
6581     if (Swap)
6582       std::swap(Op4, Op5);
6583     Operands.erase(Operands.begin() + 3);
6584   }
6585 }
6586 
6587 // this function returns true if the operand is one of the following
6588 // relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
6589 static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp) {
6590   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6591   if (!Op.isImm())
6592     return false;
6593   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6594   if (CE)
6595     return false;
6596   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6597   if (!E)
6598     return false;
6599   const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6600   if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6601                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6602                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6603                     ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6604     return true;
6605   return false;
6606 }
6607 
6608 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6609                                           OperandVector &Operands) {
6610   // FIXME: This is all horribly hacky. We really need a better way to deal
6611   // with optional operands like this in the matcher table.
6612 
6613   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6614   // another does not. Specifically, the MOVW instruction does not. So we
6615   // special case it here and remove the defaulted (non-setting) cc_out
6616   // operand if that's the instruction we're trying to match.
6617   //
6618   // We do this as post-processing of the explicit operands rather than just
6619   // conditionally adding the cc_out in the first place because we need
6620   // to check the type of the parsed immediate operand.
6621   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6622       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6623       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6624       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6625     return true;
6626 
6627   if (Mnemonic == "movs" && Operands.size() > 3 && isThumb() &&
6628       isThumbI8Relocation(*Operands[3]))
6629     return true;
6630 
6631   // Register-register 'add' for thumb does not have a cc_out operand
6632   // when there are only two register operands.
6633   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6634       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6635       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6636       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6637     return true;
6638   // Register-register 'add' for thumb does not have a cc_out operand
6639   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6640   // have to check the immediate range here since Thumb2 has a variant
6641   // that can handle a different range and has a cc_out operand.
6642   if (((isThumb() && Mnemonic == "add") ||
6643        (isThumbTwo() && Mnemonic == "sub")) &&
6644       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6645       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6646       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6647       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6648       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6649        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6650     return true;
6651   // For Thumb2, add/sub immediate does not have a cc_out operand for the
6652   // imm0_4095 variant. That's the least-preferred variant when
6653   // selecting via the generic "add" mnemonic, so to know that we
6654   // should remove the cc_out operand, we have to explicitly check that
6655   // it's not one of the other variants. Ugh.
6656   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6657       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6658       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6659       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6660     // Nest conditions rather than one big 'if' statement for readability.
6661     //
6662     // If both registers are low, we're in an IT block, and the immediate is
6663     // in range, we should use encoding T1 instead, which has a cc_out.
6664     if (inITBlock() &&
6665         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6666         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6667         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6668       return false;
6669     // Check against T3. If the second register is the PC, this is an
6670     // alternate form of ADR, which uses encoding T4, so check for that too.
6671     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6672         (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
6673          static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
6674       return false;
6675 
6676     // Otherwise, we use encoding T4, which does not have a cc_out
6677     // operand.
6678     return true;
6679   }
6680 
6681   // The thumb2 multiply instruction doesn't have a CCOut register, so
6682   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6683   // use the 16-bit encoding or not.
6684   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6685       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6686       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6687       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6688       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6689       // If the registers aren't low regs, the destination reg isn't the
6690       // same as one of the source regs, or the cc_out operand is zero
6691       // outside of an IT block, we have to use the 32-bit encoding, so
6692       // remove the cc_out operand.
6693       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6694        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6695        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6696        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6697                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6698                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6699                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
6700     return true;
6701 
6702   // Also check the 'mul' syntax variant that doesn't specify an explicit
6703   // destination register.
6704   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6705       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6706       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6707       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6708       // If the registers aren't low regs  or the cc_out operand is zero
6709       // outside of an IT block, we have to use the 32-bit encoding, so
6710       // remove the cc_out operand.
6711       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6712        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6713        !inITBlock()))
6714     return true;
6715 
6716   // Register-register 'add/sub' for thumb does not have a cc_out operand
6717   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6718   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6719   // right, this will result in better diagnostics (which operand is off)
6720   // anyway.
6721   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6722       (Operands.size() == 5 || Operands.size() == 6) &&
6723       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6724       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6725       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6726       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6727        (Operands.size() == 6 &&
6728         static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
6729     // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
6730     return (!(isThumbTwo() &&
6731               (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
6732                static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
6733   }
6734   // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
6735   // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
6736   // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
6737   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6738       (Operands.size() == 5) &&
6739       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6740       static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
6741       static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
6742       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6743       static_cast<ARMOperand &>(*Operands[4]).isImm()) {
6744     const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
6745     if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6746       return false; // add.w / sub.w
6747     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6748       const int64_t Value = CE->getValue();
6749       // Thumb1 imm8 sub / add
6750       if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
6751           isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
6752         return false;
6753       return true; // Thumb2 T4 addw / subw
6754     }
6755   }
6756   return false;
6757 }
6758 
6759 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6760                                               OperandVector &Operands) {
6761   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6762   unsigned RegIdx = 3;
6763   if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
6764       Mnemonic == "vrintr") &&
6765       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6766        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6767     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6768         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6769          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6770       RegIdx = 4;
6771 
6772     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6773         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6774              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6775          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6776              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6777       return true;
6778   }
6779   return false;
6780 }
6781 
6782 bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6783                                                     OperandVector &Operands) {
6784   if (!hasMVE() || Operands.size() < 3)
6785     return true;
6786 
6787   if (Mnemonic.startswith("vld2") || Mnemonic.startswith("vld4") ||
6788       Mnemonic.startswith("vst2") || Mnemonic.startswith("vst4"))
6789     return true;
6790 
6791   if (Mnemonic.startswith("vctp") || Mnemonic.startswith("vpnot"))
6792     return false;
6793 
6794   if (Mnemonic.startswith("vmov") &&
6795       !(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") ||
6796         Mnemonic.startswith("vmovx"))) {
6797     for (auto &Operand : Operands) {
6798       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6799           ((*Operand).isReg() &&
6800            (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6801              (*Operand).getReg()) ||
6802             ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6803               (*Operand).getReg())))) {
6804         return true;
6805       }
6806     }
6807     return false;
6808   } else {
6809     for (auto &Operand : Operands) {
6810       // We check the larger class QPR instead of just the legal class
6811       // MQPR, to more accurately report errors when using Q registers
6812       // outside of the allowed range.
6813       if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6814           (Operand->isReg() &&
6815            (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6816              Operand->getReg()))))
6817         return false;
6818     }
6819     return true;
6820   }
6821 }
6822 
6823 static bool isDataTypeToken(StringRef Tok) {
6824   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6825     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6826     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6827     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6828     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6829     Tok == ".f" || Tok == ".d";
6830 }
6831 
6832 // FIXME: This bit should probably be handled via an explicit match class
6833 // in the .td files that matches the suffix instead of having it be
6834 // a literal string token the way it is now.
6835 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6836   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
6837 }
6838 
6839 static void applyMnemonicAliases(StringRef &Mnemonic,
6840                                  const FeatureBitset &Features,
6841                                  unsigned VariantID);
6842 
6843 // The GNU assembler has aliases of ldrd and strd with the second register
6844 // omitted. We don't have a way to do that in tablegen, so fix it up here.
6845 //
6846 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6847 // the assembly parser could then generate confusing diagnostics refering to
6848 // it. If we do find anything that prevents us from doing the transformation we
6849 // bail out, and let the assembly parser report an error on the instruction as
6850 // it is written.
6851 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6852                                      OperandVector &Operands) {
6853   if (Mnemonic != "ldrd" && Mnemonic != "strd")
6854     return;
6855   if (Operands.size() < 4)
6856     return;
6857 
6858   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6859   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6860 
6861   if (!Op2.isReg())
6862     return;
6863   if (!Op3.isGPRMem())
6864     return;
6865 
6866   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6867   if (!GPR.contains(Op2.getReg()))
6868     return;
6869 
6870   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6871   if (!isThumb() && (RtEncoding & 1)) {
6872     // In ARM mode, the registers must be from an aligned pair, this
6873     // restriction does not apply in Thumb mode.
6874     return;
6875   }
6876   if (Op2.getReg() == ARM::PC)
6877     return;
6878   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6879   if (!PairedReg || PairedReg == ARM::PC ||
6880       (PairedReg == ARM::SP && !hasV8Ops()))
6881     return;
6882 
6883   Operands.insert(
6884       Operands.begin() + 3,
6885       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6886 }
6887 
6888 // Dual-register instruction have the following syntax:
6889 // <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6890 // This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6891 // operand. If the conversion fails an error is diagnosed, and the function
6892 // returns true.
6893 bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6894                                             OperandVector &Operands) {
6895   assert(MS.isCDEDualRegInstr(Mnemonic));
6896   bool isPredicable =
6897       Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
6898   size_t NumPredOps = isPredicable ? 1 : 0;
6899 
6900   if (Operands.size() <= 3 + NumPredOps)
6901     return false;
6902 
6903   StringRef Op2Diag(
6904       "operand must be an even-numbered register in the range [r0, r10]");
6905 
6906   const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
6907   if (!Op2.isReg())
6908     return Error(Op2.getStartLoc(), Op2Diag);
6909 
6910   unsigned RNext;
6911   unsigned RPair;
6912   switch (Op2.getReg()) {
6913   default:
6914     return Error(Op2.getStartLoc(), Op2Diag);
6915   case ARM::R0:
6916     RNext = ARM::R1;
6917     RPair = ARM::R0_R1;
6918     break;
6919   case ARM::R2:
6920     RNext = ARM::R3;
6921     RPair = ARM::R2_R3;
6922     break;
6923   case ARM::R4:
6924     RNext = ARM::R5;
6925     RPair = ARM::R4_R5;
6926     break;
6927   case ARM::R6:
6928     RNext = ARM::R7;
6929     RPair = ARM::R6_R7;
6930     break;
6931   case ARM::R8:
6932     RNext = ARM::R9;
6933     RPair = ARM::R8_R9;
6934     break;
6935   case ARM::R10:
6936     RNext = ARM::R11;
6937     RPair = ARM::R10_R11;
6938     break;
6939   }
6940 
6941   const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
6942   if (!Op3.isReg() || Op3.getReg() != RNext)
6943     return Error(Op3.getStartLoc(), "operand must be a consecutive register");
6944 
6945   Operands.erase(Operands.begin() + 3 + NumPredOps);
6946   Operands[2 + NumPredOps] =
6947       ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
6948   return false;
6949 }
6950 
6951 /// Parse an arm instruction mnemonic followed by its operands.
6952 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6953                                     SMLoc NameLoc, OperandVector &Operands) {
6954   MCAsmParser &Parser = getParser();
6955 
6956   // Apply mnemonic aliases before doing anything else, as the destination
6957   // mnemonic may include suffices and we want to handle them normally.
6958   // The generic tblgen'erated code does this later, at the start of
6959   // MatchInstructionImpl(), but that's too late for aliases that include
6960   // any sort of suffix.
6961   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6962   unsigned AssemblerDialect = getParser().getAssemblerDialect();
6963   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6964 
6965   // First check for the ARM-specific .req directive.
6966   if (Parser.getTok().is(AsmToken::Identifier) &&
6967       Parser.getTok().getIdentifier().lower() == ".req") {
6968     parseDirectiveReq(Name, NameLoc);
6969     // We always return 'error' for this, as we're done with this
6970     // statement and don't need to match the 'instruction."
6971     return true;
6972   }
6973 
6974   // Create the leading tokens for the mnemonic, split by '.' characters.
6975   size_t Start = 0, Next = Name.find('.');
6976   StringRef Mnemonic = Name.slice(Start, Next);
6977   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6978 
6979   // Split out the predication code and carry setting flag from the mnemonic.
6980   unsigned PredicationCode;
6981   unsigned VPTPredicationCode;
6982   unsigned ProcessorIMod;
6983   bool CarrySetting;
6984   StringRef ITMask;
6985   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6986                            CarrySetting, ProcessorIMod, ITMask);
6987 
6988   // In Thumb1, only the branch (B) instruction can be predicated.
6989   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6990     return Error(NameLoc, "conditional execution not supported in Thumb1");
6991   }
6992 
6993   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6994 
6995   // Handle the mask for IT and VPT instructions. In ARMOperand and
6996   // MCOperand, this is stored in a format independent of the
6997   // condition code: the lowest set bit indicates the end of the
6998   // encoding, and above that, a 1 bit indicates 'else', and an 0
6999   // indicates 'then'. E.g.
7000   //    IT    -> 1000
7001   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
7002   //    ITxy  -> xy10    (e.g. ITET -> 1010)
7003   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
7004   // Note: See the ARM::PredBlockMask enum in
7005   //   /lib/Target/ARM/Utils/ARMBaseInfo.h
7006   if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
7007       Mnemonic.startswith("vpst")) {
7008     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
7009                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7010                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7011     if (ITMask.size() > 3) {
7012       if (Mnemonic == "it")
7013         return Error(Loc, "too many conditions on IT instruction");
7014       return Error(Loc, "too many conditions on VPT instruction");
7015     }
7016     unsigned Mask = 8;
7017     for (char Pos : llvm::reverse(ITMask)) {
7018       if (Pos != 't' && Pos != 'e') {
7019         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7020       }
7021       Mask >>= 1;
7022       if (Pos == 'e')
7023         Mask |= 8;
7024     }
7025     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7026   }
7027 
7028   // FIXME: This is all a pretty gross hack. We should automatically handle
7029   // optional operands like this via tblgen.
7030 
7031   // Next, add the CCOut and ConditionCode operands, if needed.
7032   //
7033   // For mnemonics which can ever incorporate a carry setting bit or predication
7034   // code, our matching model involves us always generating CCOut and
7035   // ConditionCode operands to match the mnemonic "as written" and then we let
7036   // the matcher deal with finding the right instruction or generating an
7037   // appropriate error.
7038   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7039   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7040                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7041 
7042   // If we had a carry-set on an instruction that can't do that, issue an
7043   // error.
7044   if (!CanAcceptCarrySet && CarrySetting) {
7045     return Error(NameLoc, "instruction '" + Mnemonic +
7046                  "' can not set flags, but 's' suffix specified");
7047   }
7048   // If we had a predication code on an instruction that can't do that, issue an
7049   // error.
7050   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7051     return Error(NameLoc, "instruction '" + Mnemonic +
7052                  "' is not predicable, but condition code specified");
7053   }
7054 
7055   // If we had a VPT predication code on an instruction that can't do that, issue an
7056   // error.
7057   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7058     return Error(NameLoc, "instruction '" + Mnemonic +
7059                  "' is not VPT predicable, but VPT code T/E is specified");
7060   }
7061 
7062   // Add the carry setting operand, if necessary.
7063   if (CanAcceptCarrySet) {
7064     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7065     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7066                                                Loc));
7067   }
7068 
7069   // Add the predication code operand, if necessary.
7070   if (CanAcceptPredicationCode) {
7071     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7072                                       CarrySetting);
7073     Operands.push_back(ARMOperand::CreateCondCode(
7074                        ARMCC::CondCodes(PredicationCode), Loc));
7075   }
7076 
7077   // Add the VPT predication code operand, if necessary.
7078   // FIXME: We don't add them for the instructions filtered below as these can
7079   // have custom operands which need special parsing.  This parsing requires
7080   // the operand to be in the same place in the OperandVector as their
7081   // definition in tblgen.  Since these instructions may also have the
7082   // scalar predication operand we do not add the vector one and leave until
7083   // now to fix it up.
7084   if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
7085       !Mnemonic.startswith("vcmp") &&
7086       !(Mnemonic.startswith("vcvt") && Mnemonic != "vcvta" &&
7087         Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7088     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7089                                       CarrySetting);
7090     Operands.push_back(ARMOperand::CreateVPTPred(
7091                          ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7092   }
7093 
7094   // Add the processor imod operand, if necessary.
7095   if (ProcessorIMod) {
7096     Operands.push_back(ARMOperand::CreateImm(
7097           MCConstantExpr::create(ProcessorIMod, getContext()),
7098                                  NameLoc, NameLoc));
7099   } else if (Mnemonic == "cps" && isMClass()) {
7100     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7101   }
7102 
7103   // Add the remaining tokens in the mnemonic.
7104   while (Next != StringRef::npos) {
7105     Start = Next;
7106     Next = Name.find('.', Start + 1);
7107     ExtraToken = Name.slice(Start, Next);
7108 
7109     // Some NEON instructions have an optional datatype suffix that is
7110     // completely ignored. Check for that.
7111     if (isDataTypeToken(ExtraToken) &&
7112         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7113       continue;
7114 
7115     // For for ARM mode generate an error if the .n qualifier is used.
7116     if (ExtraToken == ".n" && !isThumb()) {
7117       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7118       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7119                    "arm mode");
7120     }
7121 
7122     // The .n qualifier is always discarded as that is what the tables
7123     // and matcher expect.  In ARM mode the .w qualifier has no effect,
7124     // so discard it to avoid errors that can be caused by the matcher.
7125     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7126       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7127       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7128     }
7129   }
7130 
7131   // Read the remaining operands.
7132   if (getLexer().isNot(AsmToken::EndOfStatement)) {
7133     // Read the first operand.
7134     if (parseOperand(Operands, Mnemonic)) {
7135       return true;
7136     }
7137 
7138     while (parseOptionalToken(AsmToken::Comma)) {
7139       // Parse and remember the operand.
7140       if (parseOperand(Operands, Mnemonic)) {
7141         return true;
7142       }
7143     }
7144   }
7145 
7146   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7147     return true;
7148 
7149   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
7150 
7151   if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7152     // Dual-register instructions use even-odd register pairs as their
7153     // destination operand, in assembly such pair is spelled as two
7154     // consecutive registers, without any special syntax. ConvertDualRegOperand
7155     // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7156     // It returns true, if an error message has been emitted. If the function
7157     // returns false, the function either succeeded or an error (e.g. missing
7158     // operand) will be diagnosed elsewhere.
7159     if (MS.isCDEDualRegInstr(Mnemonic)) {
7160       bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
7161       if (GotError)
7162         return GotError;
7163     }
7164   }
7165 
7166   // Some instructions, mostly Thumb, have forms for the same mnemonic that
7167   // do and don't have a cc_out optional-def operand. With some spot-checks
7168   // of the operand list, we can figure out which variant we're trying to
7169   // parse and adjust accordingly before actually matching. We shouldn't ever
7170   // try to remove a cc_out operand that was explicitly set on the
7171   // mnemonic, of course (CarrySetting == true). Reason number #317 the
7172   // table driven matcher doesn't fit well with the ARM instruction set.
7173   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
7174     Operands.erase(Operands.begin() + 1);
7175 
7176   // Some instructions have the same mnemonic, but don't always
7177   // have a predicate. Distinguish them here and delete the
7178   // appropriate predicate if needed.  This could be either the scalar
7179   // predication code or the vector predication code.
7180   if (PredicationCode == ARMCC::AL &&
7181       shouldOmitPredicateOperand(Mnemonic, Operands))
7182     Operands.erase(Operands.begin() + 1);
7183 
7184 
7185   if (hasMVE()) {
7186     if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
7187         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7188       // Very nasty hack to deal with the vector predicated variant of vmovlt
7189       // the scalar predicated vmov with condition 'lt'.  We can not tell them
7190       // apart until we have parsed their operands.
7191       Operands.erase(Operands.begin() + 1);
7192       Operands.erase(Operands.begin());
7193       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7194       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7195                                          Mnemonic.size() - 1 + CarrySetting);
7196       Operands.insert(Operands.begin(),
7197                       ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7198       Operands.insert(Operands.begin(),
7199                       ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7200     } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7201                !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7202       // Another nasty hack to deal with the ambiguity between vcvt with scalar
7203       // predication 'ne' and vcvtn with vector predication 'e'.  As above we
7204       // can only distinguish between the two after we have parsed their
7205       // operands.
7206       Operands.erase(Operands.begin() + 1);
7207       Operands.erase(Operands.begin());
7208       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7209       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7210                                          Mnemonic.size() - 1 + CarrySetting);
7211       Operands.insert(Operands.begin(),
7212                       ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7213       Operands.insert(Operands.begin(),
7214                       ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7215     } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7216                !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7217       // Another hack, this time to distinguish between scalar predicated vmul
7218       // with 'lt' predication code and the vector instruction vmullt with
7219       // vector predication code "none"
7220       Operands.erase(Operands.begin() + 1);
7221       Operands.erase(Operands.begin());
7222       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7223       Operands.insert(Operands.begin(),
7224                       ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7225     }
7226     // For vmov and vcmp, as mentioned earlier, we did not add the vector
7227     // predication code, since these may contain operands that require
7228     // special parsing.  So now we have to see if they require vector
7229     // predication and replace the scalar one with the vector predication
7230     // operand if that is the case.
7231     else if (Mnemonic == "vmov" || Mnemonic.startswith("vcmp") ||
7232              (Mnemonic.startswith("vcvt") && !Mnemonic.startswith("vcvta") &&
7233               !Mnemonic.startswith("vcvtn") && !Mnemonic.startswith("vcvtp") &&
7234               !Mnemonic.startswith("vcvtm"))) {
7235       if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7236         // We could not split the vector predicate off vcvt because it might
7237         // have been the scalar vcvtt instruction.  Now we know its a vector
7238         // instruction, we still need to check whether its the vector
7239         // predicated vcvt with 'Then' predication or the vector vcvtt.  We can
7240         // distinguish the two based on the suffixes, if it is any of
7241         // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7242         if (Mnemonic.startswith("vcvtt") && Operands.size() >= 4) {
7243           auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
7244           auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
7245           if (!(Sz1.isToken() && Sz1.getToken().startswith(".f") &&
7246               Sz2.isToken() && Sz2.getToken().startswith(".f"))) {
7247             Operands.erase(Operands.begin());
7248             SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7249             VPTPredicationCode = ARMVCC::Then;
7250 
7251             Mnemonic = Mnemonic.substr(0, 4);
7252             Operands.insert(Operands.begin(),
7253                             ARMOperand::CreateToken(Mnemonic, MLoc));
7254           }
7255         }
7256         Operands.erase(Operands.begin() + 1);
7257         SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7258                                           Mnemonic.size() + CarrySetting);
7259         Operands.insert(Operands.begin() + 1,
7260                         ARMOperand::CreateVPTPred(
7261                             ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7262       }
7263     } else if (CanAcceptVPTPredicationCode) {
7264       // For all other instructions, make sure only one of the two
7265       // predication operands is left behind, depending on whether we should
7266       // use the vector predication.
7267       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7268         if (CanAcceptPredicationCode)
7269           Operands.erase(Operands.begin() + 2);
7270         else
7271           Operands.erase(Operands.begin() + 1);
7272       } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
7273         Operands.erase(Operands.begin() + 1);
7274       }
7275     }
7276   }
7277 
7278   if (VPTPredicationCode != ARMVCC::None) {
7279     bool usedVPTPredicationCode = false;
7280     for (unsigned I = 1; I < Operands.size(); ++I)
7281       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7282         usedVPTPredicationCode = true;
7283     if (!usedVPTPredicationCode) {
7284       // If we have a VPT predication code and we haven't just turned it
7285       // into an operand, then it was a mistake for splitMnemonic to
7286       // separate it from the rest of the mnemonic in the first place,
7287       // and this may lead to wrong disassembly (e.g. scalar floating
7288       // point VCMPE is actually a different instruction from VCMP, so
7289       // we mustn't treat them the same). In that situation, glue it
7290       // back on.
7291       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7292       Operands.erase(Operands.begin());
7293       Operands.insert(Operands.begin(),
7294                       ARMOperand::CreateToken(Mnemonic, NameLoc));
7295     }
7296   }
7297 
7298     // ARM mode 'blx' need special handling, as the register operand version
7299     // is predicable, but the label operand version is not. So, we can't rely
7300     // on the Mnemonic based checking to correctly figure out when to put
7301     // a k_CondCode operand in the list. If we're trying to match the label
7302     // version, remove the k_CondCode operand here.
7303     if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
7304         static_cast<ARMOperand &>(*Operands[2]).isImm())
7305       Operands.erase(Operands.begin() + 1);
7306 
7307     // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7308     // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7309     // a single GPRPair reg operand is used in the .td file to replace the two
7310     // GPRs. However, when parsing from asm, the two GRPs cannot be
7311     // automatically
7312     // expressed as a GPRPair, so we have to manually merge them.
7313     // FIXME: We would really like to be able to tablegen'erate this.
7314     if (!isThumb() && Operands.size() > 4 &&
7315         (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7316          Mnemonic == "stlexd")) {
7317       bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7318       unsigned Idx = isLoad ? 2 : 3;
7319       ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7320       ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7321 
7322       const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7323       // Adjust only if Op1 and Op2 are GPRs.
7324       if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
7325           MRC.contains(Op2.getReg())) {
7326         unsigned Reg1 = Op1.getReg();
7327         unsigned Reg2 = Op2.getReg();
7328         unsigned Rt = MRI->getEncodingValue(Reg1);
7329         unsigned Rt2 = MRI->getEncodingValue(Reg2);
7330 
7331         // Rt2 must be Rt + 1 and Rt must be even.
7332         if (Rt + 1 != Rt2 || (Rt & 1)) {
7333           return Error(Op2.getStartLoc(),
7334                        isLoad ? "destination operands must be sequential"
7335                               : "source operands must be sequential");
7336         }
7337         unsigned NewReg = MRI->getMatchingSuperReg(
7338             Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7339         Operands[Idx] =
7340             ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7341         Operands.erase(Operands.begin() + Idx + 1);
7342       }
7343   }
7344 
7345   // GNU Assembler extension (compatibility).
7346   fixupGNULDRDAlias(Mnemonic, Operands);
7347 
7348   // FIXME: As said above, this is all a pretty gross hack.  This instruction
7349   // does not fit with other "subs" and tblgen.
7350   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7351   // so the Mnemonic is the original name "subs" and delete the predicate
7352   // operand so it will match the table entry.
7353   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
7354       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
7355       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
7356       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
7357       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
7358       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
7359     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7360     Operands.erase(Operands.begin() + 1);
7361   }
7362   return false;
7363 }
7364 
7365 // Validate context-sensitive operand constraints.
7366 
7367 // return 'true' if register list contains non-low GPR registers,
7368 // 'false' otherwise. If Reg is in the register list or is HiReg, set
7369 // 'containsReg' to true.
7370 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7371                                  unsigned Reg, unsigned HiReg,
7372                                  bool &containsReg) {
7373   containsReg = false;
7374   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7375     unsigned OpReg = Inst.getOperand(i).getReg();
7376     if (OpReg == Reg)
7377       containsReg = true;
7378     // Anything other than a low register isn't legal here.
7379     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7380       return true;
7381   }
7382   return false;
7383 }
7384 
7385 // Check if the specified regisgter is in the register list of the inst,
7386 // starting at the indicated operand number.
7387 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7388   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7389     unsigned OpReg = Inst.getOperand(i).getReg();
7390     if (OpReg == Reg)
7391       return true;
7392   }
7393   return false;
7394 }
7395 
7396 // Return true if instruction has the interesting property of being
7397 // allowed in IT blocks, but not being predicable.
7398 static bool instIsBreakpoint(const MCInst &Inst) {
7399     return Inst.getOpcode() == ARM::tBKPT ||
7400            Inst.getOpcode() == ARM::BKPT ||
7401            Inst.getOpcode() == ARM::tHLT ||
7402            Inst.getOpcode() == ARM::HLT;
7403 }
7404 
7405 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7406                                        const OperandVector &Operands,
7407                                        unsigned ListNo, bool IsARPop) {
7408   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7409   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7410 
7411   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7412   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
7413   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7414 
7415   if (!IsARPop && ListContainsSP)
7416     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7417                  "SP may not be in the register list");
7418   else if (ListContainsPC && ListContainsLR)
7419     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7420                  "PC and LR may not be in the register list simultaneously");
7421   return false;
7422 }
7423 
7424 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7425                                        const OperandVector &Operands,
7426                                        unsigned ListNo) {
7427   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7428   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7429 
7430   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7431   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7432 
7433   if (ListContainsSP && ListContainsPC)
7434     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7435                  "SP and PC may not be in the register list");
7436   else if (ListContainsSP)
7437     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7438                  "SP may not be in the register list");
7439   else if (ListContainsPC)
7440     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7441                  "PC may not be in the register list");
7442   return false;
7443 }
7444 
7445 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
7446                                     const OperandVector &Operands,
7447                                     bool Load, bool ARMMode, bool Writeback) {
7448   unsigned RtIndex = Load || !Writeback ? 0 : 1;
7449   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7450   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7451 
7452   if (ARMMode) {
7453     // Rt can't be R14.
7454     if (Rt == 14)
7455       return Error(Operands[3]->getStartLoc(),
7456                   "Rt can't be R14");
7457 
7458     // Rt must be even-numbered.
7459     if ((Rt & 1) == 1)
7460       return Error(Operands[3]->getStartLoc(),
7461                    "Rt must be even-numbered");
7462 
7463     // Rt2 must be Rt + 1.
7464     if (Rt2 != Rt + 1) {
7465       if (Load)
7466         return Error(Operands[3]->getStartLoc(),
7467                      "destination operands must be sequential");
7468       else
7469         return Error(Operands[3]->getStartLoc(),
7470                      "source operands must be sequential");
7471     }
7472 
7473     // FIXME: Diagnose m == 15
7474     // FIXME: Diagnose ldrd with m == t || m == t2.
7475   }
7476 
7477   if (!ARMMode && Load) {
7478     if (Rt2 == Rt)
7479       return Error(Operands[3]->getStartLoc(),
7480                    "destination operands can't be identical");
7481   }
7482 
7483   if (Writeback) {
7484     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7485 
7486     if (Rn == Rt || Rn == Rt2) {
7487       if (Load)
7488         return Error(Operands[3]->getStartLoc(),
7489                      "base register needs to be different from destination "
7490                      "registers");
7491       else
7492         return Error(Operands[3]->getStartLoc(),
7493                      "source register and base register can't be identical");
7494     }
7495 
7496     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7497     // (Except the immediate form of ldrd?)
7498   }
7499 
7500   return false;
7501 }
7502 
7503 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
7504   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7505     if (ARM::isVpred(MCID.operands()[i].OperandType))
7506       return i;
7507   }
7508   return -1;
7509 }
7510 
7511 static bool isVectorPredicable(const MCInstrDesc &MCID) {
7512   return findFirstVectorPredOperandIdx(MCID) != -1;
7513 }
7514 
7515 static bool isARMMCExpr(MCParsedAsmOperand &MCOp) {
7516   ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7517   if (!Op.isImm())
7518     return false;
7519   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7520   if (CE)
7521     return false;
7522   const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7523   if (!E)
7524     return false;
7525   return true;
7526 }
7527 
7528 // FIXME: We would really like to be able to tablegen'erate this.
7529 bool ARMAsmParser::validateInstruction(MCInst &Inst,
7530                                        const OperandVector &Operands) {
7531   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7532   SMLoc Loc = Operands[0]->getStartLoc();
7533 
7534   // Check the IT block state first.
7535   // NOTE: BKPT and HLT instructions have the interesting property of being
7536   // allowed in IT blocks, but not being predicable. They just always execute.
7537   if (inITBlock() && !instIsBreakpoint(Inst)) {
7538     // The instruction must be predicable.
7539     if (!MCID.isPredicable())
7540       return Error(Loc, "instructions in IT block must be predicable");
7541     ARMCC::CondCodes Cond = ARMCC::CondCodes(
7542         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
7543     if (Cond != currentITCond()) {
7544       // Find the condition code Operand to get its SMLoc information.
7545       SMLoc CondLoc;
7546       for (unsigned I = 1; I < Operands.size(); ++I)
7547         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7548           CondLoc = Operands[I]->getStartLoc();
7549       return Error(CondLoc, "incorrect condition in IT block; got '" +
7550                                 StringRef(ARMCondCodeToString(Cond)) +
7551                                 "', but expected '" +
7552                                 ARMCondCodeToString(currentITCond()) + "'");
7553     }
7554   // Check for non-'al' condition codes outside of the IT block.
7555   } else if (isThumbTwo() && MCID.isPredicable() &&
7556              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7557              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7558              Inst.getOpcode() != ARM::t2Bcc &&
7559              Inst.getOpcode() != ARM::t2BFic) {
7560     return Error(Loc, "predicated instructions must be in IT block");
7561   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7562              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7563                  ARMCC::AL) {
7564     return Warning(Loc, "predicated instructions should be in IT block");
7565   } else if (!MCID.isPredicable()) {
7566     // Check the instruction doesn't have a predicate operand anyway
7567     // that it's not allowed to use. Sometimes this happens in order
7568     // to keep instructions the same shape even though one cannot
7569     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7570     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7571       if (MCID.operands()[i].isPredicate()) {
7572         if (Inst.getOperand(i).getImm() != ARMCC::AL)
7573           return Error(Loc, "instruction is not predicable");
7574         break;
7575       }
7576     }
7577   }
7578 
7579   // PC-setting instructions in an IT block, but not the last instruction of
7580   // the block, are UNPREDICTABLE.
7581   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7582     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7583   }
7584 
7585   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7586     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7587     if (!isVectorPredicable(MCID))
7588       return Error(Loc, "instruction in VPT block must be predicable");
7589     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7590     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7591     if (Pred != VPTPred) {
7592       SMLoc PredLoc;
7593       for (unsigned I = 1; I < Operands.size(); ++I)
7594         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7595           PredLoc = Operands[I]->getStartLoc();
7596       return Error(PredLoc, "incorrect predication in VPT block; got '" +
7597                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
7598                    "', but expected '" +
7599                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7600     }
7601   }
7602   else if (isVectorPredicable(MCID) &&
7603            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7604            ARMVCC::None)
7605     return Error(Loc, "VPT predicated instructions must be in VPT block");
7606 
7607   const unsigned Opcode = Inst.getOpcode();
7608   switch (Opcode) {
7609   case ARM::t2IT: {
7610     // Encoding is unpredictable if it ever results in a notional 'NV'
7611     // predicate. Since we don't parse 'NV' directly this means an 'AL'
7612     // predicate with an "else" mask bit.
7613     unsigned Cond = Inst.getOperand(0).getImm();
7614     unsigned Mask = Inst.getOperand(1).getImm();
7615 
7616     // Conditions only allowing a 't' are those with no set bit except
7617     // the lowest-order one that indicates the end of the sequence. In
7618     // other words, powers of 2.
7619     if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7620       return Error(Loc, "unpredictable IT predicate sequence");
7621     break;
7622   }
7623   case ARM::LDRD:
7624     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7625                          /*Writeback*/false))
7626       return true;
7627     break;
7628   case ARM::LDRD_PRE:
7629   case ARM::LDRD_POST:
7630     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7631                          /*Writeback*/true))
7632       return true;
7633     break;
7634   case ARM::t2LDRDi8:
7635     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7636                          /*Writeback*/false))
7637       return true;
7638     break;
7639   case ARM::t2LDRD_PRE:
7640   case ARM::t2LDRD_POST:
7641     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7642                          /*Writeback*/true))
7643       return true;
7644     break;
7645   case ARM::t2BXJ: {
7646     const unsigned RmReg = Inst.getOperand(0).getReg();
7647     // Rm = SP is no longer unpredictable in v8-A
7648     if (RmReg == ARM::SP && !hasV8Ops())
7649       return Error(Operands[2]->getStartLoc(),
7650                    "r13 (SP) is an unpredictable operand to BXJ");
7651     return false;
7652   }
7653   case ARM::STRD:
7654     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7655                          /*Writeback*/false))
7656       return true;
7657     break;
7658   case ARM::STRD_PRE:
7659   case ARM::STRD_POST:
7660     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7661                          /*Writeback*/true))
7662       return true;
7663     break;
7664   case ARM::t2STRD_PRE:
7665   case ARM::t2STRD_POST:
7666     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7667                          /*Writeback*/true))
7668       return true;
7669     break;
7670   case ARM::STR_PRE_IMM:
7671   case ARM::STR_PRE_REG:
7672   case ARM::t2STR_PRE:
7673   case ARM::STR_POST_IMM:
7674   case ARM::STR_POST_REG:
7675   case ARM::t2STR_POST:
7676   case ARM::STRH_PRE:
7677   case ARM::t2STRH_PRE:
7678   case ARM::STRH_POST:
7679   case ARM::t2STRH_POST:
7680   case ARM::STRB_PRE_IMM:
7681   case ARM::STRB_PRE_REG:
7682   case ARM::t2STRB_PRE:
7683   case ARM::STRB_POST_IMM:
7684   case ARM::STRB_POST_REG:
7685   case ARM::t2STRB_POST: {
7686     // Rt must be different from Rn.
7687     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7688     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7689 
7690     if (Rt == Rn)
7691       return Error(Operands[3]->getStartLoc(),
7692                    "source register and base register can't be identical");
7693     return false;
7694   }
7695   case ARM::t2LDR_PRE_imm:
7696   case ARM::t2LDR_POST_imm:
7697   case ARM::t2STR_PRE_imm:
7698   case ARM::t2STR_POST_imm: {
7699     // Rt must be different from Rn.
7700     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7701     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7702 
7703     if (Rt == Rn)
7704       return Error(Operands[3]->getStartLoc(),
7705                    "destination register and base register can't be identical");
7706     if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7707         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7708       int Imm = Inst.getOperand(2).getImm();
7709       if (Imm > 255 || Imm < -255)
7710         return Error(Operands[5]->getStartLoc(),
7711                      "operand must be in range [-255, 255]");
7712     }
7713     if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7714         Inst.getOpcode() == ARM::t2STR_POST_imm) {
7715       if (Inst.getOperand(0).getReg() == ARM::PC) {
7716         return Error(Operands[3]->getStartLoc(),
7717                      "operand must be a register in range [r0, r14]");
7718       }
7719     }
7720     return false;
7721   }
7722 
7723   case ARM::t2LDRB_OFFSET_imm:
7724   case ARM::t2LDRB_PRE_imm:
7725   case ARM::t2LDRB_POST_imm:
7726   case ARM::t2STRB_OFFSET_imm:
7727   case ARM::t2STRB_PRE_imm:
7728   case ARM::t2STRB_POST_imm: {
7729     if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7730         Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7731         Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7732         Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7733       int Imm = Inst.getOperand(2).getImm();
7734       if (Imm > 255 || Imm < -255)
7735         return Error(Operands[5]->getStartLoc(),
7736                      "operand must be in range [-255, 255]");
7737     } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7738                Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7739       int Imm = Inst.getOperand(2).getImm();
7740       if (Imm > 0 || Imm < -255)
7741         return Error(Operands[5]->getStartLoc(),
7742                      "operand must be in range [0, 255] with a negative sign");
7743     }
7744     if (Inst.getOperand(0).getReg() == ARM::PC) {
7745       return Error(Operands[3]->getStartLoc(),
7746                    "if operand is PC, should call the LDRB (literal)");
7747     }
7748     return false;
7749   }
7750 
7751   case ARM::t2LDRH_OFFSET_imm:
7752   case ARM::t2LDRH_PRE_imm:
7753   case ARM::t2LDRH_POST_imm:
7754   case ARM::t2STRH_OFFSET_imm:
7755   case ARM::t2STRH_PRE_imm:
7756   case ARM::t2STRH_POST_imm: {
7757     if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7758         Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7759         Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7760         Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7761       int Imm = Inst.getOperand(2).getImm();
7762       if (Imm > 255 || Imm < -255)
7763         return Error(Operands[5]->getStartLoc(),
7764                      "operand must be in range [-255, 255]");
7765     } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7766                Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7767       int Imm = Inst.getOperand(2).getImm();
7768       if (Imm > 0 || Imm < -255)
7769         return Error(Operands[5]->getStartLoc(),
7770                      "operand must be in range [0, 255] with a negative sign");
7771     }
7772     if (Inst.getOperand(0).getReg() == ARM::PC) {
7773       return Error(Operands[3]->getStartLoc(),
7774                    "if operand is PC, should call the LDRH (literal)");
7775     }
7776     return false;
7777   }
7778 
7779   case ARM::t2LDRSB_OFFSET_imm:
7780   case ARM::t2LDRSB_PRE_imm:
7781   case ARM::t2LDRSB_POST_imm: {
7782     if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7783         Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7784       int Imm = Inst.getOperand(2).getImm();
7785       if (Imm > 255 || Imm < -255)
7786         return Error(Operands[5]->getStartLoc(),
7787                      "operand must be in range [-255, 255]");
7788     } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7789       int Imm = Inst.getOperand(2).getImm();
7790       if (Imm > 0 || Imm < -255)
7791         return Error(Operands[5]->getStartLoc(),
7792                      "operand must be in range [0, 255] with a negative sign");
7793     }
7794     if (Inst.getOperand(0).getReg() == ARM::PC) {
7795       return Error(Operands[3]->getStartLoc(),
7796                    "if operand is PC, should call the LDRH (literal)");
7797     }
7798     return false;
7799   }
7800 
7801   case ARM::t2LDRSH_OFFSET_imm:
7802   case ARM::t2LDRSH_PRE_imm:
7803   case ARM::t2LDRSH_POST_imm: {
7804     if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7805         Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7806       int Imm = Inst.getOperand(2).getImm();
7807       if (Imm > 255 || Imm < -255)
7808         return Error(Operands[5]->getStartLoc(),
7809                      "operand must be in range [-255, 255]");
7810     } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7811       int Imm = Inst.getOperand(2).getImm();
7812       if (Imm > 0 || Imm < -255)
7813         return Error(Operands[5]->getStartLoc(),
7814                      "operand must be in range [0, 255] with a negative sign");
7815     }
7816     if (Inst.getOperand(0).getReg() == ARM::PC) {
7817       return Error(Operands[3]->getStartLoc(),
7818                    "if operand is PC, should call the LDRH (literal)");
7819     }
7820     return false;
7821   }
7822 
7823   case ARM::LDR_PRE_IMM:
7824   case ARM::LDR_PRE_REG:
7825   case ARM::t2LDR_PRE:
7826   case ARM::LDR_POST_IMM:
7827   case ARM::LDR_POST_REG:
7828   case ARM::t2LDR_POST:
7829   case ARM::LDRH_PRE:
7830   case ARM::t2LDRH_PRE:
7831   case ARM::LDRH_POST:
7832   case ARM::t2LDRH_POST:
7833   case ARM::LDRSH_PRE:
7834   case ARM::t2LDRSH_PRE:
7835   case ARM::LDRSH_POST:
7836   case ARM::t2LDRSH_POST:
7837   case ARM::LDRB_PRE_IMM:
7838   case ARM::LDRB_PRE_REG:
7839   case ARM::t2LDRB_PRE:
7840   case ARM::LDRB_POST_IMM:
7841   case ARM::LDRB_POST_REG:
7842   case ARM::t2LDRB_POST:
7843   case ARM::LDRSB_PRE:
7844   case ARM::t2LDRSB_PRE:
7845   case ARM::LDRSB_POST:
7846   case ARM::t2LDRSB_POST: {
7847     // Rt must be different from Rn.
7848     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7849     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7850 
7851     if (Rt == Rn)
7852       return Error(Operands[3]->getStartLoc(),
7853                    "destination register and base register can't be identical");
7854     return false;
7855   }
7856 
7857   case ARM::MVE_VLDRBU8_rq:
7858   case ARM::MVE_VLDRBU16_rq:
7859   case ARM::MVE_VLDRBS16_rq:
7860   case ARM::MVE_VLDRBU32_rq:
7861   case ARM::MVE_VLDRBS32_rq:
7862   case ARM::MVE_VLDRHU16_rq:
7863   case ARM::MVE_VLDRHU16_rq_u:
7864   case ARM::MVE_VLDRHU32_rq:
7865   case ARM::MVE_VLDRHU32_rq_u:
7866   case ARM::MVE_VLDRHS32_rq:
7867   case ARM::MVE_VLDRHS32_rq_u:
7868   case ARM::MVE_VLDRWU32_rq:
7869   case ARM::MVE_VLDRWU32_rq_u:
7870   case ARM::MVE_VLDRDU64_rq:
7871   case ARM::MVE_VLDRDU64_rq_u:
7872   case ARM::MVE_VLDRWU32_qi:
7873   case ARM::MVE_VLDRWU32_qi_pre:
7874   case ARM::MVE_VLDRDU64_qi:
7875   case ARM::MVE_VLDRDU64_qi_pre: {
7876     // Qd must be different from Qm.
7877     unsigned QdIdx = 0, QmIdx = 2;
7878     bool QmIsPointer = false;
7879     switch (Opcode) {
7880     case ARM::MVE_VLDRWU32_qi:
7881     case ARM::MVE_VLDRDU64_qi:
7882       QmIdx = 1;
7883       QmIsPointer = true;
7884       break;
7885     case ARM::MVE_VLDRWU32_qi_pre:
7886     case ARM::MVE_VLDRDU64_qi_pre:
7887       QdIdx = 1;
7888       QmIsPointer = true;
7889       break;
7890     }
7891 
7892     const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
7893     const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
7894 
7895     if (Qd == Qm) {
7896       return Error(Operands[3]->getStartLoc(),
7897                    Twine("destination vector register and vector ") +
7898                    (QmIsPointer ? "pointer" : "offset") +
7899                    " register can't be identical");
7900     }
7901     return false;
7902   }
7903 
7904   case ARM::SBFX:
7905   case ARM::t2SBFX:
7906   case ARM::UBFX:
7907   case ARM::t2UBFX: {
7908     // Width must be in range [1, 32-lsb].
7909     unsigned LSB = Inst.getOperand(2).getImm();
7910     unsigned Widthm1 = Inst.getOperand(3).getImm();
7911     if (Widthm1 >= 32 - LSB)
7912       return Error(Operands[5]->getStartLoc(),
7913                    "bitfield width must be in range [1,32-lsb]");
7914     return false;
7915   }
7916   // Notionally handles ARM::tLDMIA_UPD too.
7917   case ARM::tLDMIA: {
7918     // If we're parsing Thumb2, the .w variant is available and handles
7919     // most cases that are normally illegal for a Thumb1 LDM instruction.
7920     // We'll make the transformation in processInstruction() if necessary.
7921     //
7922     // Thumb LDM instructions are writeback iff the base register is not
7923     // in the register list.
7924     unsigned Rn = Inst.getOperand(0).getReg();
7925     bool HasWritebackToken =
7926         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7927          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7928     bool ListContainsBase;
7929     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7930       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7931                    "registers must be in range r0-r7");
7932     // If we should have writeback, then there should be a '!' token.
7933     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7934       return Error(Operands[2]->getStartLoc(),
7935                    "writeback operator '!' expected");
7936     // If we should not have writeback, there must not be a '!'. This is
7937     // true even for the 32-bit wide encodings.
7938     if (ListContainsBase && HasWritebackToken)
7939       return Error(Operands[3]->getStartLoc(),
7940                    "writeback operator '!' not allowed when base register "
7941                    "in register list");
7942 
7943     if (validatetLDMRegList(Inst, Operands, 3))
7944       return true;
7945     break;
7946   }
7947   case ARM::LDMIA_UPD:
7948   case ARM::LDMDB_UPD:
7949   case ARM::LDMIB_UPD:
7950   case ARM::LDMDA_UPD:
7951     // ARM variants loading and updating the same register are only officially
7952     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7953     if (!hasV7Ops())
7954       break;
7955     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7956       return Error(Operands.back()->getStartLoc(),
7957                    "writeback register not allowed in register list");
7958     break;
7959   case ARM::t2LDMIA:
7960   case ARM::t2LDMDB:
7961     if (validatetLDMRegList(Inst, Operands, 3))
7962       return true;
7963     break;
7964   case ARM::t2STMIA:
7965   case ARM::t2STMDB:
7966     if (validatetSTMRegList(Inst, Operands, 3))
7967       return true;
7968     break;
7969   case ARM::t2LDMIA_UPD:
7970   case ARM::t2LDMDB_UPD:
7971   case ARM::t2STMIA_UPD:
7972   case ARM::t2STMDB_UPD:
7973     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7974       return Error(Operands.back()->getStartLoc(),
7975                    "writeback register not allowed in register list");
7976 
7977     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
7978       if (validatetLDMRegList(Inst, Operands, 3))
7979         return true;
7980     } else {
7981       if (validatetSTMRegList(Inst, Operands, 3))
7982         return true;
7983     }
7984     break;
7985 
7986   case ARM::sysLDMIA_UPD:
7987   case ARM::sysLDMDA_UPD:
7988   case ARM::sysLDMDB_UPD:
7989   case ARM::sysLDMIB_UPD:
7990     if (!listContainsReg(Inst, 3, ARM::PC))
7991       return Error(Operands[4]->getStartLoc(),
7992                    "writeback register only allowed on system LDM "
7993                    "if PC in register-list");
7994     break;
7995   case ARM::sysSTMIA_UPD:
7996   case ARM::sysSTMDA_UPD:
7997   case ARM::sysSTMDB_UPD:
7998   case ARM::sysSTMIB_UPD:
7999     return Error(Operands[2]->getStartLoc(),
8000                  "system STM cannot have writeback register");
8001   case ARM::tMUL:
8002     // The second source operand must be the same register as the destination
8003     // operand.
8004     //
8005     // In this case, we must directly check the parsed operands because the
8006     // cvtThumbMultiply() function is written in such a way that it guarantees
8007     // this first statement is always true for the new Inst.  Essentially, the
8008     // destination is unconditionally copied into the second source operand
8009     // without checking to see if it matches what we actually parsed.
8010     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
8011                                  ((ARMOperand &)*Operands[5]).getReg()) &&
8012         (((ARMOperand &)*Operands[3]).getReg() !=
8013          ((ARMOperand &)*Operands[4]).getReg())) {
8014       return Error(Operands[3]->getStartLoc(),
8015                    "destination register must match source register");
8016     }
8017     break;
8018 
8019   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8020   // so only issue a diagnostic for thumb1. The instructions will be
8021   // switched to the t2 encodings in processInstruction() if necessary.
8022   case ARM::tPOP: {
8023     bool ListContainsBase;
8024     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8025         !isThumbTwo())
8026       return Error(Operands[2]->getStartLoc(),
8027                    "registers must be in range r0-r7 or pc");
8028     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
8029       return true;
8030     break;
8031   }
8032   case ARM::tPUSH: {
8033     bool ListContainsBase;
8034     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8035         !isThumbTwo())
8036       return Error(Operands[2]->getStartLoc(),
8037                    "registers must be in range r0-r7 or lr");
8038     if (validatetSTMRegList(Inst, Operands, 2))
8039       return true;
8040     break;
8041   }
8042   case ARM::tSTMIA_UPD: {
8043     bool ListContainsBase, InvalidLowList;
8044     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8045                                           0, ListContainsBase);
8046     if (InvalidLowList && !isThumbTwo())
8047       return Error(Operands[4]->getStartLoc(),
8048                    "registers must be in range r0-r7");
8049 
8050     // This would be converted to a 32-bit stm, but that's not valid if the
8051     // writeback register is in the list.
8052     if (InvalidLowList && ListContainsBase)
8053       return Error(Operands[4]->getStartLoc(),
8054                    "writeback operator '!' not allowed when base register "
8055                    "in register list");
8056 
8057     if (validatetSTMRegList(Inst, Operands, 4))
8058       return true;
8059     break;
8060   }
8061   case ARM::tADDrSP:
8062     // If the non-SP source operand and the destination operand are not the
8063     // same, we need thumb2 (for the wide encoding), or we have an error.
8064     if (!isThumbTwo() &&
8065         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8066       return Error(Operands[4]->getStartLoc(),
8067                    "source register must be the same as destination");
8068     }
8069     break;
8070 
8071   case ARM::t2ADDrr:
8072   case ARM::t2ADDrs:
8073   case ARM::t2SUBrr:
8074   case ARM::t2SUBrs:
8075     if (Inst.getOperand(0).getReg() == ARM::SP &&
8076         Inst.getOperand(1).getReg() != ARM::SP)
8077       return Error(Operands[4]->getStartLoc(),
8078                    "source register must be sp if destination is sp");
8079     break;
8080 
8081   // Final range checking for Thumb unconditional branch instructions.
8082   case ARM::tB:
8083     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
8084       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8085     break;
8086   case ARM::t2B: {
8087     int op = (Operands[2]->isImm()) ? 2 : 3;
8088     ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8089     // Delay the checks of symbolic expressions until they are resolved.
8090     if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8091         !Operand.isSignedOffset<24, 1>())
8092       return Error(Operands[op]->getStartLoc(), "branch target out of range");
8093     break;
8094   }
8095   // Final range checking for Thumb conditional branch instructions.
8096   case ARM::tBcc:
8097     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
8098       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8099     break;
8100   case ARM::t2Bcc: {
8101     int Op = (Operands[2]->isImm()) ? 2 : 3;
8102     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8103       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8104     break;
8105   }
8106   case ARM::tCBZ:
8107   case ARM::tCBNZ: {
8108     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
8109       return Error(Operands[2]->getStartLoc(), "branch target out of range");
8110     break;
8111   }
8112   case ARM::MOVi16:
8113   case ARM::MOVTi16:
8114   case ARM::t2MOVi16:
8115   case ARM::t2MOVTi16:
8116     {
8117     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8118     // especially when we turn it into a movw and the expression <symbol> does
8119     // not have a :lower16: or :upper16 as part of the expression.  We don't
8120     // want the behavior of silently truncating, which can be unexpected and
8121     // lead to bugs that are difficult to find since this is an easy mistake
8122     // to make.
8123     int i = (Operands[3]->isImm()) ? 3 : 4;
8124     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8125     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8126     if (CE) break;
8127     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8128     if (!E) break;
8129     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8130     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8131                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8132       return Error(
8133           Op.getStartLoc(),
8134           "immediate expression for mov requires :lower16: or :upper16");
8135     break;
8136   }
8137   case ARM::tADDi8: {
8138     MCParsedAsmOperand &Op = *Operands[4];
8139     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8140       return Error(Op.getStartLoc(),
8141                    "Immediate expression for Thumb adds requires :lower0_7:,"
8142                    " :lower8_15:, :upper0_7: or :upper8_15:");
8143     break;
8144   }
8145   case ARM::tMOVi8: {
8146     MCParsedAsmOperand &Op = *Operands[2];
8147     if (isARMMCExpr(Op) && !isThumbI8Relocation(Op))
8148       return Error(Op.getStartLoc(),
8149                    "Immediate expression for Thumb movs requires :lower0_7:,"
8150                    " :lower8_15:, :upper0_7: or :upper8_15:");
8151     break;
8152   }
8153   case ARM::HINT:
8154   case ARM::t2HINT: {
8155     unsigned Imm8 = Inst.getOperand(0).getImm();
8156     unsigned Pred = Inst.getOperand(1).getImm();
8157     // ESB is not predicable (pred must be AL). Without the RAS extension, this
8158     // behaves as any other unallocated hint.
8159     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8160       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8161                                                "predicable, but condition "
8162                                                "code specified");
8163     if (Imm8 == 0x14 && Pred != ARMCC::AL)
8164       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8165                                                "predicable, but condition "
8166                                                "code specified");
8167     break;
8168   }
8169   case ARM::t2BFi:
8170   case ARM::t2BFr:
8171   case ARM::t2BFLi:
8172   case ARM::t2BFLr: {
8173     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
8174         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8175       return Error(Operands[2]->getStartLoc(),
8176                    "branch location out of range or not a multiple of 2");
8177 
8178     if (Opcode == ARM::t2BFi) {
8179       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
8180         return Error(Operands[3]->getStartLoc(),
8181                      "branch target out of range or not a multiple of 2");
8182     } else if (Opcode == ARM::t2BFLi) {
8183       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
8184         return Error(Operands[3]->getStartLoc(),
8185                      "branch target out of range or not a multiple of 2");
8186     }
8187     break;
8188   }
8189   case ARM::t2BFic: {
8190     if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
8191         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8192       return Error(Operands[1]->getStartLoc(),
8193                    "branch location out of range or not a multiple of 2");
8194 
8195     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
8196       return Error(Operands[2]->getStartLoc(),
8197                    "branch target out of range or not a multiple of 2");
8198 
8199     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8200            "branch location and else branch target should either both be "
8201            "immediates or both labels");
8202 
8203     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8204       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8205       if (Diff != 4 && Diff != 2)
8206         return Error(
8207             Operands[3]->getStartLoc(),
8208             "else branch target must be 2 or 4 greater than the branch location");
8209     }
8210     break;
8211   }
8212   case ARM::t2CLRM: {
8213     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8214       if (Inst.getOperand(i).isReg() &&
8215           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8216               Inst.getOperand(i).getReg())) {
8217         return Error(Operands[2]->getStartLoc(),
8218                      "invalid register in register list. Valid registers are "
8219                      "r0-r12, lr/r14 and APSR.");
8220       }
8221     }
8222     break;
8223   }
8224   case ARM::DSB:
8225   case ARM::t2DSB: {
8226 
8227     if (Inst.getNumOperands() < 2)
8228       break;
8229 
8230     unsigned Option = Inst.getOperand(0).getImm();
8231     unsigned Pred = Inst.getOperand(1).getImm();
8232 
8233     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8234     if (Option == 0 && Pred != ARMCC::AL)
8235       return Error(Operands[1]->getStartLoc(),
8236                    "instruction 'ssbb' is not predicable, but condition code "
8237                    "specified");
8238     if (Option == 4 && Pred != ARMCC::AL)
8239       return Error(Operands[1]->getStartLoc(),
8240                    "instruction 'pssbb' is not predicable, but condition code "
8241                    "specified");
8242     break;
8243   }
8244   case ARM::VMOVRRS: {
8245     // Source registers must be sequential.
8246     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8247     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8248     if (Sm1 != Sm + 1)
8249       return Error(Operands[5]->getStartLoc(),
8250                    "source operands must be sequential");
8251     break;
8252   }
8253   case ARM::VMOVSRR: {
8254     // Destination registers must be sequential.
8255     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8256     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8257     if (Sm1 != Sm + 1)
8258       return Error(Operands[3]->getStartLoc(),
8259                    "destination operands must be sequential");
8260     break;
8261   }
8262   case ARM::VLDMDIA:
8263   case ARM::VSTMDIA: {
8264     ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
8265     auto &RegList = Op.getRegList();
8266     if (RegList.size() < 1 || RegList.size() > 16)
8267       return Error(Operands[3]->getStartLoc(),
8268                    "list of registers must be at least 1 and at most 16");
8269     break;
8270   }
8271   case ARM::MVE_VQDMULLs32bh:
8272   case ARM::MVE_VQDMULLs32th:
8273   case ARM::MVE_VCMULf32:
8274   case ARM::MVE_VMULLBs32:
8275   case ARM::MVE_VMULLTs32:
8276   case ARM::MVE_VMULLBu32:
8277   case ARM::MVE_VMULLTu32: {
8278     if (Operands[3]->getReg() == Operands[4]->getReg()) {
8279       return Error (Operands[3]->getStartLoc(),
8280                     "Qd register and Qn register can't be identical");
8281     }
8282     if (Operands[3]->getReg() == Operands[5]->getReg()) {
8283       return Error (Operands[3]->getStartLoc(),
8284                     "Qd register and Qm register can't be identical");
8285     }
8286     break;
8287   }
8288   case ARM::MVE_VREV64_8:
8289   case ARM::MVE_VREV64_16:
8290   case ARM::MVE_VREV64_32:
8291   case ARM::MVE_VQDMULL_qr_s32bh:
8292   case ARM::MVE_VQDMULL_qr_s32th: {
8293     if (Operands[3]->getReg() == Operands[4]->getReg()) {
8294       return Error (Operands[3]->getStartLoc(),
8295                     "Qd register and Qn register can't be identical");
8296     }
8297     break;
8298   }
8299   case ARM::MVE_VCADDi32:
8300   case ARM::MVE_VCADDf32:
8301   case ARM::MVE_VHCADDs32: {
8302     if (Operands[3]->getReg() == Operands[5]->getReg()) {
8303       return Error (Operands[3]->getStartLoc(),
8304                     "Qd register and Qm register can't be identical");
8305     }
8306     break;
8307   }
8308   case ARM::MVE_VMOV_rr_q: {
8309     if (Operands[4]->getReg() != Operands[6]->getReg())
8310       return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
8311     if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
8312         static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
8313       return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8314     break;
8315   }
8316   case ARM::MVE_VMOV_q_rr: {
8317     if (Operands[2]->getReg() != Operands[4]->getReg())
8318       return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
8319     if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
8320         static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
8321       return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8322     break;
8323   }
8324   case ARM::UMAAL:
8325   case ARM::UMLAL:
8326   case ARM::UMULL:
8327   case ARM::t2UMAAL:
8328   case ARM::t2UMLAL:
8329   case ARM::t2UMULL:
8330   case ARM::SMLAL:
8331   case ARM::SMLALBB:
8332   case ARM::SMLALBT:
8333   case ARM::SMLALD:
8334   case ARM::SMLALDX:
8335   case ARM::SMLALTB:
8336   case ARM::SMLALTT:
8337   case ARM::SMLSLD:
8338   case ARM::SMLSLDX:
8339   case ARM::SMULL:
8340   case ARM::t2SMLAL:
8341   case ARM::t2SMLALBB:
8342   case ARM::t2SMLALBT:
8343   case ARM::t2SMLALD:
8344   case ARM::t2SMLALDX:
8345   case ARM::t2SMLALTB:
8346   case ARM::t2SMLALTT:
8347   case ARM::t2SMLSLD:
8348   case ARM::t2SMLSLDX:
8349   case ARM::t2SMULL: {
8350     unsigned RdHi = Inst.getOperand(0).getReg();
8351     unsigned RdLo = Inst.getOperand(1).getReg();
8352     if(RdHi == RdLo) {
8353       return Error(Loc,
8354                    "unpredictable instruction, RdHi and RdLo must be different");
8355     }
8356     break;
8357   }
8358 
8359   case ARM::CDE_CX1:
8360   case ARM::CDE_CX1A:
8361   case ARM::CDE_CX1D:
8362   case ARM::CDE_CX1DA:
8363   case ARM::CDE_CX2:
8364   case ARM::CDE_CX2A:
8365   case ARM::CDE_CX2D:
8366   case ARM::CDE_CX2DA:
8367   case ARM::CDE_CX3:
8368   case ARM::CDE_CX3A:
8369   case ARM::CDE_CX3D:
8370   case ARM::CDE_CX3DA:
8371   case ARM::CDE_VCX1_vec:
8372   case ARM::CDE_VCX1_fpsp:
8373   case ARM::CDE_VCX1_fpdp:
8374   case ARM::CDE_VCX1A_vec:
8375   case ARM::CDE_VCX1A_fpsp:
8376   case ARM::CDE_VCX1A_fpdp:
8377   case ARM::CDE_VCX2_vec:
8378   case ARM::CDE_VCX2_fpsp:
8379   case ARM::CDE_VCX2_fpdp:
8380   case ARM::CDE_VCX2A_vec:
8381   case ARM::CDE_VCX2A_fpsp:
8382   case ARM::CDE_VCX2A_fpdp:
8383   case ARM::CDE_VCX3_vec:
8384   case ARM::CDE_VCX3_fpsp:
8385   case ARM::CDE_VCX3_fpdp:
8386   case ARM::CDE_VCX3A_vec:
8387   case ARM::CDE_VCX3A_fpsp:
8388   case ARM::CDE_VCX3A_fpdp: {
8389     assert(Inst.getOperand(1).isImm() &&
8390            "CDE operand 1 must be a coprocessor ID");
8391     int64_t Coproc = Inst.getOperand(1).getImm();
8392     if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8393       return Error(Operands[1]->getStartLoc(),
8394                    "coprocessor must be configured as CDE");
8395     else if (Coproc >= 8)
8396       return Error(Operands[1]->getStartLoc(),
8397                    "coprocessor must be in the range [p0, p7]");
8398     break;
8399   }
8400 
8401   case ARM::t2CDP:
8402   case ARM::t2CDP2:
8403   case ARM::t2LDC2L_OFFSET:
8404   case ARM::t2LDC2L_OPTION:
8405   case ARM::t2LDC2L_POST:
8406   case ARM::t2LDC2L_PRE:
8407   case ARM::t2LDC2_OFFSET:
8408   case ARM::t2LDC2_OPTION:
8409   case ARM::t2LDC2_POST:
8410   case ARM::t2LDC2_PRE:
8411   case ARM::t2LDCL_OFFSET:
8412   case ARM::t2LDCL_OPTION:
8413   case ARM::t2LDCL_POST:
8414   case ARM::t2LDCL_PRE:
8415   case ARM::t2LDC_OFFSET:
8416   case ARM::t2LDC_OPTION:
8417   case ARM::t2LDC_POST:
8418   case ARM::t2LDC_PRE:
8419   case ARM::t2MCR:
8420   case ARM::t2MCR2:
8421   case ARM::t2MCRR:
8422   case ARM::t2MCRR2:
8423   case ARM::t2MRC:
8424   case ARM::t2MRC2:
8425   case ARM::t2MRRC:
8426   case ARM::t2MRRC2:
8427   case ARM::t2STC2L_OFFSET:
8428   case ARM::t2STC2L_OPTION:
8429   case ARM::t2STC2L_POST:
8430   case ARM::t2STC2L_PRE:
8431   case ARM::t2STC2_OFFSET:
8432   case ARM::t2STC2_OPTION:
8433   case ARM::t2STC2_POST:
8434   case ARM::t2STC2_PRE:
8435   case ARM::t2STCL_OFFSET:
8436   case ARM::t2STCL_OPTION:
8437   case ARM::t2STCL_POST:
8438   case ARM::t2STCL_PRE:
8439   case ARM::t2STC_OFFSET:
8440   case ARM::t2STC_OPTION:
8441   case ARM::t2STC_POST:
8442   case ARM::t2STC_PRE: {
8443     unsigned Opcode = Inst.getOpcode();
8444     // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8445     // CopInd is the index of the coprocessor operand.
8446     size_t CopInd = 0;
8447     if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8448       CopInd = 2;
8449     else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8450       CopInd = 1;
8451     assert(Inst.getOperand(CopInd).isImm() &&
8452            "Operand must be a coprocessor ID");
8453     int64_t Coproc = Inst.getOperand(CopInd).getImm();
8454     // Operands[2] is the coprocessor operand at syntactic level
8455     if (ARM::isCDECoproc(Coproc, *STI))
8456       return Error(Operands[2]->getStartLoc(),
8457                    "coprocessor must be configured as GCP");
8458     break;
8459   }
8460   }
8461 
8462   return false;
8463 }
8464 
8465 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8466   switch(Opc) {
8467   default: llvm_unreachable("unexpected opcode!");
8468   // VST1LN
8469   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8470   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8471   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8472   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
8473   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8474   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8475   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
8476   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8477   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8478 
8479   // VST2LN
8480   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8481   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8482   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8483   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8484   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8485 
8486   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
8487   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8488   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8489   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8490   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8491 
8492   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
8493   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8494   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8495   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8496   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8497 
8498   // VST3LN
8499   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8500   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8501   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8502   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8503   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8504   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
8505   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8506   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8507   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8508   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8509   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
8510   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8511   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8512   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8513   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8514 
8515   // VST3
8516   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8517   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8518   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8519   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8520   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8521   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8522   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
8523   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8524   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8525   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
8526   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8527   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8528   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
8529   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8530   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8531   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
8532   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8533   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8534 
8535   // VST4LN
8536   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8537   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8538   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8539   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8540   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8541   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
8542   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8543   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8544   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8545   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8546   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
8547   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8548   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8549   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8550   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8551 
8552   // VST4
8553   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8554   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8555   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8556   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8557   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8558   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8559   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
8560   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8561   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8562   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
8563   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8564   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8565   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
8566   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8567   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8568   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
8569   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8570   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8571   }
8572 }
8573 
8574 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8575   switch(Opc) {
8576   default: llvm_unreachable("unexpected opcode!");
8577   // VLD1LN
8578   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8579   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8580   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8581   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
8582   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8583   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8584   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
8585   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8586   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8587 
8588   // VLD2LN
8589   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8590   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8591   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8592   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8593   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8594   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
8595   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8596   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8597   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8598   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8599   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
8600   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8601   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8602   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8603   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8604 
8605   // VLD3DUP
8606   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8607   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8608   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8609   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8610   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8611   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8612   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
8613   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8614   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8615   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8616   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8617   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8618   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
8619   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8620   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8621   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8622   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8623   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8624 
8625   // VLD3LN
8626   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8627   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8628   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8629   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8630   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8631   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
8632   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8633   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8634   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8635   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8636   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
8637   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8638   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8639   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8640   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8641 
8642   // VLD3
8643   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8644   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8645   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8646   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8647   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8648   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8649   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
8650   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8651   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8652   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
8653   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8654   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8655   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
8656   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8657   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8658   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
8659   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8660   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8661 
8662   // VLD4LN
8663   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8664   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8665   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8666   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8667   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8668   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
8669   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8670   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8671   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8672   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8673   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
8674   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8675   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8676   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8677   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8678 
8679   // VLD4DUP
8680   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8681   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8682   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8683   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8684   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8685   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8686   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
8687   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8688   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8689   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8690   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8691   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8692   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
8693   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8694   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8695   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8696   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8697   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8698 
8699   // VLD4
8700   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8701   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8702   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8703   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8704   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8705   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8706   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
8707   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8708   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8709   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
8710   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8711   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8712   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
8713   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8714   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8715   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
8716   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8717   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8718   }
8719 }
8720 
8721 bool ARMAsmParser::processInstruction(MCInst &Inst,
8722                                       const OperandVector &Operands,
8723                                       MCStreamer &Out) {
8724   // Check if we have the wide qualifier, because if it's present we
8725   // must avoid selecting a 16-bit thumb instruction.
8726   bool HasWideQualifier = false;
8727   for (auto &Op : Operands) {
8728     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8729     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8730       HasWideQualifier = true;
8731       break;
8732     }
8733   }
8734 
8735   switch (Inst.getOpcode()) {
8736   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8737   case ARM::LDRT_POST:
8738   case ARM::LDRBT_POST: {
8739     const unsigned Opcode =
8740       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8741                                            : ARM::LDRBT_POST_IMM;
8742     MCInst TmpInst;
8743     TmpInst.setOpcode(Opcode);
8744     TmpInst.addOperand(Inst.getOperand(0));
8745     TmpInst.addOperand(Inst.getOperand(1));
8746     TmpInst.addOperand(Inst.getOperand(1));
8747     TmpInst.addOperand(MCOperand::createReg(0));
8748     TmpInst.addOperand(MCOperand::createImm(0));
8749     TmpInst.addOperand(Inst.getOperand(2));
8750     TmpInst.addOperand(Inst.getOperand(3));
8751     Inst = TmpInst;
8752     return true;
8753   }
8754   // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8755   case ARM::LDRSBTii:
8756   case ARM::LDRHTii:
8757   case ARM::LDRSHTii: {
8758     MCInst TmpInst;
8759 
8760     if (Inst.getOpcode() == ARM::LDRSBTii)
8761       TmpInst.setOpcode(ARM::LDRSBTi);
8762     else if (Inst.getOpcode() == ARM::LDRHTii)
8763       TmpInst.setOpcode(ARM::LDRHTi);
8764     else if (Inst.getOpcode() == ARM::LDRSHTii)
8765       TmpInst.setOpcode(ARM::LDRSHTi);
8766     TmpInst.addOperand(Inst.getOperand(0));
8767     TmpInst.addOperand(Inst.getOperand(1));
8768     TmpInst.addOperand(Inst.getOperand(1));
8769     TmpInst.addOperand(MCOperand::createImm(256));
8770     TmpInst.addOperand(Inst.getOperand(2));
8771     Inst = TmpInst;
8772     return true;
8773   }
8774   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8775   case ARM::STRT_POST:
8776   case ARM::STRBT_POST: {
8777     const unsigned Opcode =
8778       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8779                                            : ARM::STRBT_POST_IMM;
8780     MCInst TmpInst;
8781     TmpInst.setOpcode(Opcode);
8782     TmpInst.addOperand(Inst.getOperand(1));
8783     TmpInst.addOperand(Inst.getOperand(0));
8784     TmpInst.addOperand(Inst.getOperand(1));
8785     TmpInst.addOperand(MCOperand::createReg(0));
8786     TmpInst.addOperand(MCOperand::createImm(0));
8787     TmpInst.addOperand(Inst.getOperand(2));
8788     TmpInst.addOperand(Inst.getOperand(3));
8789     Inst = TmpInst;
8790     return true;
8791   }
8792   // Alias for alternate form of 'ADR Rd, #imm' instruction.
8793   case ARM::ADDri: {
8794     if (Inst.getOperand(1).getReg() != ARM::PC ||
8795         Inst.getOperand(5).getReg() != 0 ||
8796         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8797       return false;
8798     MCInst TmpInst;
8799     TmpInst.setOpcode(ARM::ADR);
8800     TmpInst.addOperand(Inst.getOperand(0));
8801     if (Inst.getOperand(2).isImm()) {
8802       // Immediate (mod_imm) will be in its encoded form, we must unencode it
8803       // before passing it to the ADR instruction.
8804       unsigned Enc = Inst.getOperand(2).getImm();
8805       TmpInst.addOperand(MCOperand::createImm(
8806           llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8807     } else {
8808       // Turn PC-relative expression into absolute expression.
8809       // Reading PC provides the start of the current instruction + 8 and
8810       // the transform to adr is biased by that.
8811       MCSymbol *Dot = getContext().createTempSymbol();
8812       Out.emitLabel(Dot);
8813       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8814       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8815                                                      MCSymbolRefExpr::VK_None,
8816                                                      getContext());
8817       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8818       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8819                                                      getContext());
8820       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8821                                                         getContext());
8822       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8823     }
8824     TmpInst.addOperand(Inst.getOperand(3));
8825     TmpInst.addOperand(Inst.getOperand(4));
8826     Inst = TmpInst;
8827     return true;
8828   }
8829   // Aliases for imm syntax of LDR instructions.
8830   case ARM::t2LDR_PRE_imm:
8831   case ARM::t2LDR_POST_imm: {
8832     MCInst TmpInst;
8833     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
8834                                                              : ARM::t2LDR_POST);
8835     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8836     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8837     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8838     TmpInst.addOperand(Inst.getOperand(2)); // imm
8839     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8840     Inst = TmpInst;
8841     return true;
8842   }
8843   // Aliases for imm syntax of STR instructions.
8844   case ARM::t2STR_PRE_imm:
8845   case ARM::t2STR_POST_imm: {
8846     MCInst TmpInst;
8847     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
8848                                                              : ARM::t2STR_POST);
8849     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8850     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8851     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8852     TmpInst.addOperand(Inst.getOperand(2)); // imm
8853     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8854     Inst = TmpInst;
8855     return true;
8856   }
8857   // Aliases for imm syntax of LDRB instructions.
8858   case ARM::t2LDRB_OFFSET_imm: {
8859     MCInst TmpInst;
8860     TmpInst.setOpcode(ARM::t2LDRBi8);
8861     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8862     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8863     TmpInst.addOperand(Inst.getOperand(2)); // imm
8864     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8865     Inst = TmpInst;
8866     return true;
8867   }
8868   case ARM::t2LDRB_PRE_imm:
8869   case ARM::t2LDRB_POST_imm: {
8870     MCInst TmpInst;
8871     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
8872                           ? ARM::t2LDRB_PRE
8873                           : ARM::t2LDRB_POST);
8874     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8875     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8876     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8877     TmpInst.addOperand(Inst.getOperand(2)); // imm
8878     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8879     Inst = TmpInst;
8880     return true;
8881   }
8882   // Aliases for imm syntax of STRB instructions.
8883   case ARM::t2STRB_OFFSET_imm: {
8884     MCInst TmpInst;
8885     TmpInst.setOpcode(ARM::t2STRBi8);
8886     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8887     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8888     TmpInst.addOperand(Inst.getOperand(2)); // imm
8889     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8890     Inst = TmpInst;
8891     return true;
8892   }
8893   case ARM::t2STRB_PRE_imm:
8894   case ARM::t2STRB_POST_imm: {
8895     MCInst TmpInst;
8896     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
8897                           ? ARM::t2STRB_PRE
8898                           : ARM::t2STRB_POST);
8899     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8900     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8901     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8902     TmpInst.addOperand(Inst.getOperand(2)); // imm
8903     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8904     Inst = TmpInst;
8905     return true;
8906   }
8907   // Aliases for imm syntax of LDRH instructions.
8908   case ARM::t2LDRH_OFFSET_imm: {
8909     MCInst TmpInst;
8910     TmpInst.setOpcode(ARM::t2LDRHi8);
8911     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8912     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8913     TmpInst.addOperand(Inst.getOperand(2)); // imm
8914     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8915     Inst = TmpInst;
8916     return true;
8917   }
8918   case ARM::t2LDRH_PRE_imm:
8919   case ARM::t2LDRH_POST_imm: {
8920     MCInst TmpInst;
8921     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
8922                           ? ARM::t2LDRH_PRE
8923                           : ARM::t2LDRH_POST);
8924     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8925     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8926     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8927     TmpInst.addOperand(Inst.getOperand(2)); // imm
8928     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8929     Inst = TmpInst;
8930     return true;
8931   }
8932   // Aliases for imm syntax of STRH instructions.
8933   case ARM::t2STRH_OFFSET_imm: {
8934     MCInst TmpInst;
8935     TmpInst.setOpcode(ARM::t2STRHi8);
8936     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8937     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8938     TmpInst.addOperand(Inst.getOperand(2)); // imm
8939     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8940     Inst = TmpInst;
8941     return true;
8942   }
8943   case ARM::t2STRH_PRE_imm:
8944   case ARM::t2STRH_POST_imm: {
8945     MCInst TmpInst;
8946     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
8947                           ? ARM::t2STRH_PRE
8948                           : ARM::t2STRH_POST);
8949     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8950     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8951     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8952     TmpInst.addOperand(Inst.getOperand(2)); // imm
8953     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8954     Inst = TmpInst;
8955     return true;
8956   }
8957   // Aliases for imm syntax of LDRSB instructions.
8958   case ARM::t2LDRSB_OFFSET_imm: {
8959     MCInst TmpInst;
8960     TmpInst.setOpcode(ARM::t2LDRSBi8);
8961     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8962     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8963     TmpInst.addOperand(Inst.getOperand(2)); // imm
8964     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8965     Inst = TmpInst;
8966     return true;
8967   }
8968   case ARM::t2LDRSB_PRE_imm:
8969   case ARM::t2LDRSB_POST_imm: {
8970     MCInst TmpInst;
8971     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
8972                           ? ARM::t2LDRSB_PRE
8973                           : ARM::t2LDRSB_POST);
8974     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8975     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8976     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8977     TmpInst.addOperand(Inst.getOperand(2)); // imm
8978     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8979     Inst = TmpInst;
8980     return true;
8981   }
8982   // Aliases for imm syntax of LDRSH instructions.
8983   case ARM::t2LDRSH_OFFSET_imm: {
8984     MCInst TmpInst;
8985     TmpInst.setOpcode(ARM::t2LDRSHi8);
8986     TmpInst.addOperand(Inst.getOperand(0)); // Rt
8987     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8988     TmpInst.addOperand(Inst.getOperand(2)); // imm
8989     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8990     Inst = TmpInst;
8991     return true;
8992   }
8993   case ARM::t2LDRSH_PRE_imm:
8994   case ARM::t2LDRSH_POST_imm: {
8995     MCInst TmpInst;
8996     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
8997                           ? ARM::t2LDRSH_PRE
8998                           : ARM::t2LDRSH_POST);
8999     TmpInst.addOperand(Inst.getOperand(0)); // Rt
9000     TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9001     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9002     TmpInst.addOperand(Inst.getOperand(2)); // imm
9003     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9004     Inst = TmpInst;
9005     return true;
9006   }
9007   // Aliases for alternate PC+imm syntax of LDR instructions.
9008   case ARM::t2LDRpcrel:
9009     // Select the narrow version if the immediate will fit.
9010     if (Inst.getOperand(1).getImm() > 0 &&
9011         Inst.getOperand(1).getImm() <= 0xff &&
9012         !HasWideQualifier)
9013       Inst.setOpcode(ARM::tLDRpci);
9014     else
9015       Inst.setOpcode(ARM::t2LDRpci);
9016     return true;
9017   case ARM::t2LDRBpcrel:
9018     Inst.setOpcode(ARM::t2LDRBpci);
9019     return true;
9020   case ARM::t2LDRHpcrel:
9021     Inst.setOpcode(ARM::t2LDRHpci);
9022     return true;
9023   case ARM::t2LDRSBpcrel:
9024     Inst.setOpcode(ARM::t2LDRSBpci);
9025     return true;
9026   case ARM::t2LDRSHpcrel:
9027     Inst.setOpcode(ARM::t2LDRSHpci);
9028     return true;
9029   case ARM::LDRConstPool:
9030   case ARM::tLDRConstPool:
9031   case ARM::t2LDRConstPool: {
9032     // Pseudo instruction ldr rt, =immediate is converted to a
9033     // MOV rt, immediate if immediate is known and representable
9034     // otherwise we create a constant pool entry that we load from.
9035     MCInst TmpInst;
9036     if (Inst.getOpcode() == ARM::LDRConstPool)
9037       TmpInst.setOpcode(ARM::LDRi12);
9038     else if (Inst.getOpcode() == ARM::tLDRConstPool)
9039       TmpInst.setOpcode(ARM::tLDRpci);
9040     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9041       TmpInst.setOpcode(ARM::t2LDRpci);
9042     const ARMOperand &PoolOperand =
9043       (HasWideQualifier ?
9044        static_cast<ARMOperand &>(*Operands[4]) :
9045        static_cast<ARMOperand &>(*Operands[3]));
9046     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9047     // If SubExprVal is a constant we may be able to use a MOV
9048     if (isa<MCConstantExpr>(SubExprVal) &&
9049         Inst.getOperand(0).getReg() != ARM::PC &&
9050         Inst.getOperand(0).getReg() != ARM::SP) {
9051       int64_t Value =
9052         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9053       bool UseMov  = true;
9054       bool MovHasS = true;
9055       if (Inst.getOpcode() == ARM::LDRConstPool) {
9056         // ARM Constant
9057         if (ARM_AM::getSOImmVal(Value) != -1) {
9058           Value = ARM_AM::getSOImmVal(Value);
9059           TmpInst.setOpcode(ARM::MOVi);
9060         }
9061         else if (ARM_AM::getSOImmVal(~Value) != -1) {
9062           Value = ARM_AM::getSOImmVal(~Value);
9063           TmpInst.setOpcode(ARM::MVNi);
9064         }
9065         else if (hasV6T2Ops() &&
9066                  Value >=0 && Value < 65536) {
9067           TmpInst.setOpcode(ARM::MOVi16);
9068           MovHasS = false;
9069         }
9070         else
9071           UseMov = false;
9072       }
9073       else {
9074         // Thumb/Thumb2 Constant
9075         if (hasThumb2() &&
9076             ARM_AM::getT2SOImmVal(Value) != -1)
9077           TmpInst.setOpcode(ARM::t2MOVi);
9078         else if (hasThumb2() &&
9079                  ARM_AM::getT2SOImmVal(~Value) != -1) {
9080           TmpInst.setOpcode(ARM::t2MVNi);
9081           Value = ~Value;
9082         }
9083         else if (hasV8MBaseline() &&
9084                  Value >=0 && Value < 65536) {
9085           TmpInst.setOpcode(ARM::t2MOVi16);
9086           MovHasS = false;
9087         }
9088         else
9089           UseMov = false;
9090       }
9091       if (UseMov) {
9092         TmpInst.addOperand(Inst.getOperand(0));           // Rt
9093         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
9094         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9095         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9096         if (MovHasS)
9097           TmpInst.addOperand(MCOperand::createReg(0));    // S
9098         Inst = TmpInst;
9099         return true;
9100       }
9101     }
9102     // No opportunity to use MOV/MVN create constant pool
9103     const MCExpr *CPLoc =
9104       getTargetStreamer().addConstantPoolEntry(SubExprVal,
9105                                                PoolOperand.getStartLoc());
9106     TmpInst.addOperand(Inst.getOperand(0));           // Rt
9107     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9108     if (TmpInst.getOpcode() == ARM::LDRi12)
9109       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
9110     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
9111     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
9112     Inst = TmpInst;
9113     return true;
9114   }
9115   // Handle NEON VST complex aliases.
9116   case ARM::VST1LNdWB_register_Asm_8:
9117   case ARM::VST1LNdWB_register_Asm_16:
9118   case ARM::VST1LNdWB_register_Asm_32: {
9119     MCInst TmpInst;
9120     // Shuffle the operands around so the lane index operand is in the
9121     // right place.
9122     unsigned Spacing;
9123     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9124     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9125     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9126     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9127     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9128     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9129     TmpInst.addOperand(Inst.getOperand(1)); // lane
9130     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9131     TmpInst.addOperand(Inst.getOperand(6));
9132     Inst = TmpInst;
9133     return true;
9134   }
9135 
9136   case ARM::VST2LNdWB_register_Asm_8:
9137   case ARM::VST2LNdWB_register_Asm_16:
9138   case ARM::VST2LNdWB_register_Asm_32:
9139   case ARM::VST2LNqWB_register_Asm_16:
9140   case ARM::VST2LNqWB_register_Asm_32: {
9141     MCInst TmpInst;
9142     // Shuffle the operands around so the lane index operand is in the
9143     // right place.
9144     unsigned Spacing;
9145     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9146     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9147     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9148     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9149     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9150     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9151     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9152                                             Spacing));
9153     TmpInst.addOperand(Inst.getOperand(1)); // lane
9154     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9155     TmpInst.addOperand(Inst.getOperand(6));
9156     Inst = TmpInst;
9157     return true;
9158   }
9159 
9160   case ARM::VST3LNdWB_register_Asm_8:
9161   case ARM::VST3LNdWB_register_Asm_16:
9162   case ARM::VST3LNdWB_register_Asm_32:
9163   case ARM::VST3LNqWB_register_Asm_16:
9164   case ARM::VST3LNqWB_register_Asm_32: {
9165     MCInst TmpInst;
9166     // Shuffle the operands around so the lane index operand is in the
9167     // right place.
9168     unsigned Spacing;
9169     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9170     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9171     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9172     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9173     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9174     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9175     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9176                                             Spacing));
9177     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9178                                             Spacing * 2));
9179     TmpInst.addOperand(Inst.getOperand(1)); // lane
9180     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9181     TmpInst.addOperand(Inst.getOperand(6));
9182     Inst = TmpInst;
9183     return true;
9184   }
9185 
9186   case ARM::VST4LNdWB_register_Asm_8:
9187   case ARM::VST4LNdWB_register_Asm_16:
9188   case ARM::VST4LNdWB_register_Asm_32:
9189   case ARM::VST4LNqWB_register_Asm_16:
9190   case ARM::VST4LNqWB_register_Asm_32: {
9191     MCInst TmpInst;
9192     // Shuffle the operands around so the lane index operand is in the
9193     // right place.
9194     unsigned Spacing;
9195     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9196     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9197     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9198     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9199     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9200     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9201     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9202                                             Spacing));
9203     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9204                                             Spacing * 2));
9205     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9206                                             Spacing * 3));
9207     TmpInst.addOperand(Inst.getOperand(1)); // lane
9208     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9209     TmpInst.addOperand(Inst.getOperand(6));
9210     Inst = TmpInst;
9211     return true;
9212   }
9213 
9214   case ARM::VST1LNdWB_fixed_Asm_8:
9215   case ARM::VST1LNdWB_fixed_Asm_16:
9216   case ARM::VST1LNdWB_fixed_Asm_32: {
9217     MCInst TmpInst;
9218     // Shuffle the operands around so the lane index operand is in the
9219     // right place.
9220     unsigned Spacing;
9221     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9222     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9223     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9224     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9225     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9226     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9227     TmpInst.addOperand(Inst.getOperand(1)); // lane
9228     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9229     TmpInst.addOperand(Inst.getOperand(5));
9230     Inst = TmpInst;
9231     return true;
9232   }
9233 
9234   case ARM::VST2LNdWB_fixed_Asm_8:
9235   case ARM::VST2LNdWB_fixed_Asm_16:
9236   case ARM::VST2LNdWB_fixed_Asm_32:
9237   case ARM::VST2LNqWB_fixed_Asm_16:
9238   case ARM::VST2LNqWB_fixed_Asm_32: {
9239     MCInst TmpInst;
9240     // Shuffle the operands around so the lane index operand is in the
9241     // right place.
9242     unsigned Spacing;
9243     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9244     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9245     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9246     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9247     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9248     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9249     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9250                                             Spacing));
9251     TmpInst.addOperand(Inst.getOperand(1)); // lane
9252     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9253     TmpInst.addOperand(Inst.getOperand(5));
9254     Inst = TmpInst;
9255     return true;
9256   }
9257 
9258   case ARM::VST3LNdWB_fixed_Asm_8:
9259   case ARM::VST3LNdWB_fixed_Asm_16:
9260   case ARM::VST3LNdWB_fixed_Asm_32:
9261   case ARM::VST3LNqWB_fixed_Asm_16:
9262   case ARM::VST3LNqWB_fixed_Asm_32: {
9263     MCInst TmpInst;
9264     // Shuffle the operands around so the lane index operand is in the
9265     // right place.
9266     unsigned Spacing;
9267     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9268     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9269     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9270     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9271     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9272     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9273     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9274                                             Spacing));
9275     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9276                                             Spacing * 2));
9277     TmpInst.addOperand(Inst.getOperand(1)); // lane
9278     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9279     TmpInst.addOperand(Inst.getOperand(5));
9280     Inst = TmpInst;
9281     return true;
9282   }
9283 
9284   case ARM::VST4LNdWB_fixed_Asm_8:
9285   case ARM::VST4LNdWB_fixed_Asm_16:
9286   case ARM::VST4LNdWB_fixed_Asm_32:
9287   case ARM::VST4LNqWB_fixed_Asm_16:
9288   case ARM::VST4LNqWB_fixed_Asm_32: {
9289     MCInst TmpInst;
9290     // Shuffle the operands around so the lane index operand is in the
9291     // right place.
9292     unsigned Spacing;
9293     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9294     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9295     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9296     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9297     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9298     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9299     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9300                                             Spacing));
9301     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9302                                             Spacing * 2));
9303     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9304                                             Spacing * 3));
9305     TmpInst.addOperand(Inst.getOperand(1)); // lane
9306     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9307     TmpInst.addOperand(Inst.getOperand(5));
9308     Inst = TmpInst;
9309     return true;
9310   }
9311 
9312   case ARM::VST1LNdAsm_8:
9313   case ARM::VST1LNdAsm_16:
9314   case ARM::VST1LNdAsm_32: {
9315     MCInst TmpInst;
9316     // Shuffle the operands around so the lane index operand is in the
9317     // right place.
9318     unsigned Spacing;
9319     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9320     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9321     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9322     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9323     TmpInst.addOperand(Inst.getOperand(1)); // lane
9324     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9325     TmpInst.addOperand(Inst.getOperand(5));
9326     Inst = TmpInst;
9327     return true;
9328   }
9329 
9330   case ARM::VST2LNdAsm_8:
9331   case ARM::VST2LNdAsm_16:
9332   case ARM::VST2LNdAsm_32:
9333   case ARM::VST2LNqAsm_16:
9334   case ARM::VST2LNqAsm_32: {
9335     MCInst TmpInst;
9336     // Shuffle the operands around so the lane index operand is in the
9337     // right place.
9338     unsigned Spacing;
9339     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9340     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9341     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9342     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9343     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9344                                             Spacing));
9345     TmpInst.addOperand(Inst.getOperand(1)); // lane
9346     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9347     TmpInst.addOperand(Inst.getOperand(5));
9348     Inst = TmpInst;
9349     return true;
9350   }
9351 
9352   case ARM::VST3LNdAsm_8:
9353   case ARM::VST3LNdAsm_16:
9354   case ARM::VST3LNdAsm_32:
9355   case ARM::VST3LNqAsm_16:
9356   case ARM::VST3LNqAsm_32: {
9357     MCInst TmpInst;
9358     // Shuffle the operands around so the lane index operand is in the
9359     // right place.
9360     unsigned Spacing;
9361     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9362     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9363     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9364     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9365     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9366                                             Spacing));
9367     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9368                                             Spacing * 2));
9369     TmpInst.addOperand(Inst.getOperand(1)); // lane
9370     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9371     TmpInst.addOperand(Inst.getOperand(5));
9372     Inst = TmpInst;
9373     return true;
9374   }
9375 
9376   case ARM::VST4LNdAsm_8:
9377   case ARM::VST4LNdAsm_16:
9378   case ARM::VST4LNdAsm_32:
9379   case ARM::VST4LNqAsm_16:
9380   case ARM::VST4LNqAsm_32: {
9381     MCInst TmpInst;
9382     // Shuffle the operands around so the lane index operand is in the
9383     // right place.
9384     unsigned Spacing;
9385     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9386     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9387     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9388     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9389     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9390                                             Spacing));
9391     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9392                                             Spacing * 2));
9393     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9394                                             Spacing * 3));
9395     TmpInst.addOperand(Inst.getOperand(1)); // lane
9396     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9397     TmpInst.addOperand(Inst.getOperand(5));
9398     Inst = TmpInst;
9399     return true;
9400   }
9401 
9402   // Handle NEON VLD complex aliases.
9403   case ARM::VLD1LNdWB_register_Asm_8:
9404   case ARM::VLD1LNdWB_register_Asm_16:
9405   case ARM::VLD1LNdWB_register_Asm_32: {
9406     MCInst TmpInst;
9407     // Shuffle the operands around so the lane index operand is in the
9408     // right place.
9409     unsigned Spacing;
9410     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9411     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9412     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9413     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9414     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9415     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9416     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9417     TmpInst.addOperand(Inst.getOperand(1)); // lane
9418     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9419     TmpInst.addOperand(Inst.getOperand(6));
9420     Inst = TmpInst;
9421     return true;
9422   }
9423 
9424   case ARM::VLD2LNdWB_register_Asm_8:
9425   case ARM::VLD2LNdWB_register_Asm_16:
9426   case ARM::VLD2LNdWB_register_Asm_32:
9427   case ARM::VLD2LNqWB_register_Asm_16:
9428   case ARM::VLD2LNqWB_register_Asm_32: {
9429     MCInst TmpInst;
9430     // Shuffle the operands around so the lane index operand is in the
9431     // right place.
9432     unsigned Spacing;
9433     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9434     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9435     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9436                                             Spacing));
9437     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9438     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9439     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9440     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9441     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9442     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9443                                             Spacing));
9444     TmpInst.addOperand(Inst.getOperand(1)); // lane
9445     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9446     TmpInst.addOperand(Inst.getOperand(6));
9447     Inst = TmpInst;
9448     return true;
9449   }
9450 
9451   case ARM::VLD3LNdWB_register_Asm_8:
9452   case ARM::VLD3LNdWB_register_Asm_16:
9453   case ARM::VLD3LNdWB_register_Asm_32:
9454   case ARM::VLD3LNqWB_register_Asm_16:
9455   case ARM::VLD3LNqWB_register_Asm_32: {
9456     MCInst TmpInst;
9457     // Shuffle the operands around so the lane index operand is in the
9458     // right place.
9459     unsigned Spacing;
9460     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9461     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9462     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9463                                             Spacing));
9464     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9465                                             Spacing * 2));
9466     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9467     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9468     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9469     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9470     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9471     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9472                                             Spacing));
9473     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9474                                             Spacing * 2));
9475     TmpInst.addOperand(Inst.getOperand(1)); // lane
9476     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9477     TmpInst.addOperand(Inst.getOperand(6));
9478     Inst = TmpInst;
9479     return true;
9480   }
9481 
9482   case ARM::VLD4LNdWB_register_Asm_8:
9483   case ARM::VLD4LNdWB_register_Asm_16:
9484   case ARM::VLD4LNdWB_register_Asm_32:
9485   case ARM::VLD4LNqWB_register_Asm_16:
9486   case ARM::VLD4LNqWB_register_Asm_32: {
9487     MCInst TmpInst;
9488     // Shuffle the operands around so the lane index operand is in the
9489     // right place.
9490     unsigned Spacing;
9491     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9492     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9493     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9494                                             Spacing));
9495     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9496                                             Spacing * 2));
9497     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9498                                             Spacing * 3));
9499     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9500     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9501     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9502     TmpInst.addOperand(Inst.getOperand(4)); // Rm
9503     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9504     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9505                                             Spacing));
9506     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9507                                             Spacing * 2));
9508     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9509                                             Spacing * 3));
9510     TmpInst.addOperand(Inst.getOperand(1)); // lane
9511     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9512     TmpInst.addOperand(Inst.getOperand(6));
9513     Inst = TmpInst;
9514     return true;
9515   }
9516 
9517   case ARM::VLD1LNdWB_fixed_Asm_8:
9518   case ARM::VLD1LNdWB_fixed_Asm_16:
9519   case ARM::VLD1LNdWB_fixed_Asm_32: {
9520     MCInst TmpInst;
9521     // Shuffle the operands around so the lane index operand is in the
9522     // right place.
9523     unsigned Spacing;
9524     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9525     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9526     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9527     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9528     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9529     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9530     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9531     TmpInst.addOperand(Inst.getOperand(1)); // lane
9532     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9533     TmpInst.addOperand(Inst.getOperand(5));
9534     Inst = TmpInst;
9535     return true;
9536   }
9537 
9538   case ARM::VLD2LNdWB_fixed_Asm_8:
9539   case ARM::VLD2LNdWB_fixed_Asm_16:
9540   case ARM::VLD2LNdWB_fixed_Asm_32:
9541   case ARM::VLD2LNqWB_fixed_Asm_16:
9542   case ARM::VLD2LNqWB_fixed_Asm_32: {
9543     MCInst TmpInst;
9544     // Shuffle the operands around so the lane index operand is in the
9545     // right place.
9546     unsigned Spacing;
9547     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9548     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9549     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9550                                             Spacing));
9551     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9552     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9553     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9554     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9555     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9556     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9557                                             Spacing));
9558     TmpInst.addOperand(Inst.getOperand(1)); // lane
9559     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9560     TmpInst.addOperand(Inst.getOperand(5));
9561     Inst = TmpInst;
9562     return true;
9563   }
9564 
9565   case ARM::VLD3LNdWB_fixed_Asm_8:
9566   case ARM::VLD3LNdWB_fixed_Asm_16:
9567   case ARM::VLD3LNdWB_fixed_Asm_32:
9568   case ARM::VLD3LNqWB_fixed_Asm_16:
9569   case ARM::VLD3LNqWB_fixed_Asm_32: {
9570     MCInst TmpInst;
9571     // Shuffle the operands around so the lane index operand is in the
9572     // right place.
9573     unsigned Spacing;
9574     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9575     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9576     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9577                                             Spacing));
9578     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9579                                             Spacing * 2));
9580     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9581     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9582     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9583     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9584     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9585     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9586                                             Spacing));
9587     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9588                                             Spacing * 2));
9589     TmpInst.addOperand(Inst.getOperand(1)); // lane
9590     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9591     TmpInst.addOperand(Inst.getOperand(5));
9592     Inst = TmpInst;
9593     return true;
9594   }
9595 
9596   case ARM::VLD4LNdWB_fixed_Asm_8:
9597   case ARM::VLD4LNdWB_fixed_Asm_16:
9598   case ARM::VLD4LNdWB_fixed_Asm_32:
9599   case ARM::VLD4LNqWB_fixed_Asm_16:
9600   case ARM::VLD4LNqWB_fixed_Asm_32: {
9601     MCInst TmpInst;
9602     // Shuffle the operands around so the lane index operand is in the
9603     // right place.
9604     unsigned Spacing;
9605     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9606     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9607     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9608                                             Spacing));
9609     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9610                                             Spacing * 2));
9611     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9612                                             Spacing * 3));
9613     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9614     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9615     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9616     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9617     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9618     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9619                                             Spacing));
9620     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9621                                             Spacing * 2));
9622     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9623                                             Spacing * 3));
9624     TmpInst.addOperand(Inst.getOperand(1)); // lane
9625     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9626     TmpInst.addOperand(Inst.getOperand(5));
9627     Inst = TmpInst;
9628     return true;
9629   }
9630 
9631   case ARM::VLD1LNdAsm_8:
9632   case ARM::VLD1LNdAsm_16:
9633   case ARM::VLD1LNdAsm_32: {
9634     MCInst TmpInst;
9635     // Shuffle the operands around so the lane index operand is in the
9636     // right place.
9637     unsigned Spacing;
9638     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9639     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9640     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9641     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9642     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9643     TmpInst.addOperand(Inst.getOperand(1)); // lane
9644     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9645     TmpInst.addOperand(Inst.getOperand(5));
9646     Inst = TmpInst;
9647     return true;
9648   }
9649 
9650   case ARM::VLD2LNdAsm_8:
9651   case ARM::VLD2LNdAsm_16:
9652   case ARM::VLD2LNdAsm_32:
9653   case ARM::VLD2LNqAsm_16:
9654   case ARM::VLD2LNqAsm_32: {
9655     MCInst TmpInst;
9656     // Shuffle the operands around so the lane index operand is in the
9657     // right place.
9658     unsigned Spacing;
9659     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9660     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9661     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9662                                             Spacing));
9663     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9664     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9665     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9666     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9667                                             Spacing));
9668     TmpInst.addOperand(Inst.getOperand(1)); // lane
9669     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9670     TmpInst.addOperand(Inst.getOperand(5));
9671     Inst = TmpInst;
9672     return true;
9673   }
9674 
9675   case ARM::VLD3LNdAsm_8:
9676   case ARM::VLD3LNdAsm_16:
9677   case ARM::VLD3LNdAsm_32:
9678   case ARM::VLD3LNqAsm_16:
9679   case ARM::VLD3LNqAsm_32: {
9680     MCInst TmpInst;
9681     // Shuffle the operands around so the lane index operand is in the
9682     // right place.
9683     unsigned Spacing;
9684     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9685     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9686     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9687                                             Spacing));
9688     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9689                                             Spacing * 2));
9690     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9691     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9692     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9693     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9694                                             Spacing));
9695     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9696                                             Spacing * 2));
9697     TmpInst.addOperand(Inst.getOperand(1)); // lane
9698     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9699     TmpInst.addOperand(Inst.getOperand(5));
9700     Inst = TmpInst;
9701     return true;
9702   }
9703 
9704   case ARM::VLD4LNdAsm_8:
9705   case ARM::VLD4LNdAsm_16:
9706   case ARM::VLD4LNdAsm_32:
9707   case ARM::VLD4LNqAsm_16:
9708   case ARM::VLD4LNqAsm_32: {
9709     MCInst TmpInst;
9710     // Shuffle the operands around so the lane index operand is in the
9711     // right place.
9712     unsigned Spacing;
9713     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9714     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9715     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9716                                             Spacing));
9717     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9718                                             Spacing * 2));
9719     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9720                                             Spacing * 3));
9721     TmpInst.addOperand(Inst.getOperand(2)); // Rn
9722     TmpInst.addOperand(Inst.getOperand(3)); // alignment
9723     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9724     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9725                                             Spacing));
9726     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9727                                             Spacing * 2));
9728     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9729                                             Spacing * 3));
9730     TmpInst.addOperand(Inst.getOperand(1)); // lane
9731     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9732     TmpInst.addOperand(Inst.getOperand(5));
9733     Inst = TmpInst;
9734     return true;
9735   }
9736 
9737   // VLD3DUP single 3-element structure to all lanes instructions.
9738   case ARM::VLD3DUPdAsm_8:
9739   case ARM::VLD3DUPdAsm_16:
9740   case ARM::VLD3DUPdAsm_32:
9741   case ARM::VLD3DUPqAsm_8:
9742   case ARM::VLD3DUPqAsm_16:
9743   case ARM::VLD3DUPqAsm_32: {
9744     MCInst TmpInst;
9745     unsigned Spacing;
9746     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9747     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9748     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9749                                             Spacing));
9750     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9751                                             Spacing * 2));
9752     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9753     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9754     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9755     TmpInst.addOperand(Inst.getOperand(4));
9756     Inst = TmpInst;
9757     return true;
9758   }
9759 
9760   case ARM::VLD3DUPdWB_fixed_Asm_8:
9761   case ARM::VLD3DUPdWB_fixed_Asm_16:
9762   case ARM::VLD3DUPdWB_fixed_Asm_32:
9763   case ARM::VLD3DUPqWB_fixed_Asm_8:
9764   case ARM::VLD3DUPqWB_fixed_Asm_16:
9765   case ARM::VLD3DUPqWB_fixed_Asm_32: {
9766     MCInst TmpInst;
9767     unsigned Spacing;
9768     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9769     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9770     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9771                                             Spacing));
9772     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9773                                             Spacing * 2));
9774     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9775     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9776     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9777     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9778     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9779     TmpInst.addOperand(Inst.getOperand(4));
9780     Inst = TmpInst;
9781     return true;
9782   }
9783 
9784   case ARM::VLD3DUPdWB_register_Asm_8:
9785   case ARM::VLD3DUPdWB_register_Asm_16:
9786   case ARM::VLD3DUPdWB_register_Asm_32:
9787   case ARM::VLD3DUPqWB_register_Asm_8:
9788   case ARM::VLD3DUPqWB_register_Asm_16:
9789   case ARM::VLD3DUPqWB_register_Asm_32: {
9790     MCInst TmpInst;
9791     unsigned Spacing;
9792     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9793     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9794     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9795                                             Spacing));
9796     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9797                                             Spacing * 2));
9798     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9799     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9800     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9801     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9802     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9803     TmpInst.addOperand(Inst.getOperand(5));
9804     Inst = TmpInst;
9805     return true;
9806   }
9807 
9808   // VLD3 multiple 3-element structure instructions.
9809   case ARM::VLD3dAsm_8:
9810   case ARM::VLD3dAsm_16:
9811   case ARM::VLD3dAsm_32:
9812   case ARM::VLD3qAsm_8:
9813   case ARM::VLD3qAsm_16:
9814   case ARM::VLD3qAsm_32: {
9815     MCInst TmpInst;
9816     unsigned Spacing;
9817     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9818     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9819     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9820                                             Spacing));
9821     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9822                                             Spacing * 2));
9823     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9824     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9825     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9826     TmpInst.addOperand(Inst.getOperand(4));
9827     Inst = TmpInst;
9828     return true;
9829   }
9830 
9831   case ARM::VLD3dWB_fixed_Asm_8:
9832   case ARM::VLD3dWB_fixed_Asm_16:
9833   case ARM::VLD3dWB_fixed_Asm_32:
9834   case ARM::VLD3qWB_fixed_Asm_8:
9835   case ARM::VLD3qWB_fixed_Asm_16:
9836   case ARM::VLD3qWB_fixed_Asm_32: {
9837     MCInst TmpInst;
9838     unsigned Spacing;
9839     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9840     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9841     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9842                                             Spacing));
9843     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9844                                             Spacing * 2));
9845     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9846     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9847     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9848     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9849     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9850     TmpInst.addOperand(Inst.getOperand(4));
9851     Inst = TmpInst;
9852     return true;
9853   }
9854 
9855   case ARM::VLD3dWB_register_Asm_8:
9856   case ARM::VLD3dWB_register_Asm_16:
9857   case ARM::VLD3dWB_register_Asm_32:
9858   case ARM::VLD3qWB_register_Asm_8:
9859   case ARM::VLD3qWB_register_Asm_16:
9860   case ARM::VLD3qWB_register_Asm_32: {
9861     MCInst TmpInst;
9862     unsigned Spacing;
9863     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9864     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9865     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9866                                             Spacing));
9867     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9868                                             Spacing * 2));
9869     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9870     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9871     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9872     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9873     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9874     TmpInst.addOperand(Inst.getOperand(5));
9875     Inst = TmpInst;
9876     return true;
9877   }
9878 
9879   // VLD4DUP single 3-element structure to all lanes instructions.
9880   case ARM::VLD4DUPdAsm_8:
9881   case ARM::VLD4DUPdAsm_16:
9882   case ARM::VLD4DUPdAsm_32:
9883   case ARM::VLD4DUPqAsm_8:
9884   case ARM::VLD4DUPqAsm_16:
9885   case ARM::VLD4DUPqAsm_32: {
9886     MCInst TmpInst;
9887     unsigned Spacing;
9888     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9889     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9890     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9891                                             Spacing));
9892     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9893                                             Spacing * 2));
9894     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9895                                             Spacing * 3));
9896     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9897     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9898     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9899     TmpInst.addOperand(Inst.getOperand(4));
9900     Inst = TmpInst;
9901     return true;
9902   }
9903 
9904   case ARM::VLD4DUPdWB_fixed_Asm_8:
9905   case ARM::VLD4DUPdWB_fixed_Asm_16:
9906   case ARM::VLD4DUPdWB_fixed_Asm_32:
9907   case ARM::VLD4DUPqWB_fixed_Asm_8:
9908   case ARM::VLD4DUPqWB_fixed_Asm_16:
9909   case ARM::VLD4DUPqWB_fixed_Asm_32: {
9910     MCInst TmpInst;
9911     unsigned Spacing;
9912     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9913     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9914     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9915                                             Spacing));
9916     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9917                                             Spacing * 2));
9918     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9919                                             Spacing * 3));
9920     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9921     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9922     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9923     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9924     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9925     TmpInst.addOperand(Inst.getOperand(4));
9926     Inst = TmpInst;
9927     return true;
9928   }
9929 
9930   case ARM::VLD4DUPdWB_register_Asm_8:
9931   case ARM::VLD4DUPdWB_register_Asm_16:
9932   case ARM::VLD4DUPdWB_register_Asm_32:
9933   case ARM::VLD4DUPqWB_register_Asm_8:
9934   case ARM::VLD4DUPqWB_register_Asm_16:
9935   case ARM::VLD4DUPqWB_register_Asm_32: {
9936     MCInst TmpInst;
9937     unsigned Spacing;
9938     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9939     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9940     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9941                                             Spacing));
9942     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9943                                             Spacing * 2));
9944     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9945                                             Spacing * 3));
9946     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9947     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9948     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9949     TmpInst.addOperand(Inst.getOperand(3)); // Rm
9950     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9951     TmpInst.addOperand(Inst.getOperand(5));
9952     Inst = TmpInst;
9953     return true;
9954   }
9955 
9956   // VLD4 multiple 4-element structure instructions.
9957   case ARM::VLD4dAsm_8:
9958   case ARM::VLD4dAsm_16:
9959   case ARM::VLD4dAsm_32:
9960   case ARM::VLD4qAsm_8:
9961   case ARM::VLD4qAsm_16:
9962   case ARM::VLD4qAsm_32: {
9963     MCInst TmpInst;
9964     unsigned Spacing;
9965     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9966     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9967     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9968                                             Spacing));
9969     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9970                                             Spacing * 2));
9971     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9972                                             Spacing * 3));
9973     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9974     TmpInst.addOperand(Inst.getOperand(2)); // alignment
9975     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9976     TmpInst.addOperand(Inst.getOperand(4));
9977     Inst = TmpInst;
9978     return true;
9979   }
9980 
9981   case ARM::VLD4dWB_fixed_Asm_8:
9982   case ARM::VLD4dWB_fixed_Asm_16:
9983   case ARM::VLD4dWB_fixed_Asm_32:
9984   case ARM::VLD4qWB_fixed_Asm_8:
9985   case ARM::VLD4qWB_fixed_Asm_16:
9986   case ARM::VLD4qWB_fixed_Asm_32: {
9987     MCInst TmpInst;
9988     unsigned Spacing;
9989     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9990     TmpInst.addOperand(Inst.getOperand(0)); // Vd
9991     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9992                                             Spacing));
9993     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9994                                             Spacing * 2));
9995     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
9996                                             Spacing * 3));
9997     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9998     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9999     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10000     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10001     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10002     TmpInst.addOperand(Inst.getOperand(4));
10003     Inst = TmpInst;
10004     return true;
10005   }
10006 
10007   case ARM::VLD4dWB_register_Asm_8:
10008   case ARM::VLD4dWB_register_Asm_16:
10009   case ARM::VLD4dWB_register_Asm_32:
10010   case ARM::VLD4qWB_register_Asm_8:
10011   case ARM::VLD4qWB_register_Asm_16:
10012   case ARM::VLD4qWB_register_Asm_32: {
10013     MCInst TmpInst;
10014     unsigned Spacing;
10015     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10016     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10017     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10018                                             Spacing));
10019     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10020                                             Spacing * 2));
10021     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10022                                             Spacing * 3));
10023     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10024     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10025     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10026     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10027     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10028     TmpInst.addOperand(Inst.getOperand(5));
10029     Inst = TmpInst;
10030     return true;
10031   }
10032 
10033   // VST3 multiple 3-element structure instructions.
10034   case ARM::VST3dAsm_8:
10035   case ARM::VST3dAsm_16:
10036   case ARM::VST3dAsm_32:
10037   case ARM::VST3qAsm_8:
10038   case ARM::VST3qAsm_16:
10039   case ARM::VST3qAsm_32: {
10040     MCInst TmpInst;
10041     unsigned Spacing;
10042     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10043     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10044     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10045     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10046     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10047                                             Spacing));
10048     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10049                                             Spacing * 2));
10050     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10051     TmpInst.addOperand(Inst.getOperand(4));
10052     Inst = TmpInst;
10053     return true;
10054   }
10055 
10056   case ARM::VST3dWB_fixed_Asm_8:
10057   case ARM::VST3dWB_fixed_Asm_16:
10058   case ARM::VST3dWB_fixed_Asm_32:
10059   case ARM::VST3qWB_fixed_Asm_8:
10060   case ARM::VST3qWB_fixed_Asm_16:
10061   case ARM::VST3qWB_fixed_Asm_32: {
10062     MCInst TmpInst;
10063     unsigned Spacing;
10064     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10065     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10066     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10067     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10068     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10069     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10070     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10071                                             Spacing));
10072     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10073                                             Spacing * 2));
10074     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10075     TmpInst.addOperand(Inst.getOperand(4));
10076     Inst = TmpInst;
10077     return true;
10078   }
10079 
10080   case ARM::VST3dWB_register_Asm_8:
10081   case ARM::VST3dWB_register_Asm_16:
10082   case ARM::VST3dWB_register_Asm_32:
10083   case ARM::VST3qWB_register_Asm_8:
10084   case ARM::VST3qWB_register_Asm_16:
10085   case ARM::VST3qWB_register_Asm_32: {
10086     MCInst TmpInst;
10087     unsigned Spacing;
10088     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10089     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10090     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10091     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10092     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10093     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10094     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10095                                             Spacing));
10096     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10097                                             Spacing * 2));
10098     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10099     TmpInst.addOperand(Inst.getOperand(5));
10100     Inst = TmpInst;
10101     return true;
10102   }
10103 
10104   // VST4 multiple 3-element structure instructions.
10105   case ARM::VST4dAsm_8:
10106   case ARM::VST4dAsm_16:
10107   case ARM::VST4dAsm_32:
10108   case ARM::VST4qAsm_8:
10109   case ARM::VST4qAsm_16:
10110   case ARM::VST4qAsm_32: {
10111     MCInst TmpInst;
10112     unsigned Spacing;
10113     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10114     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10115     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10116     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10117     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10118                                             Spacing));
10119     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10120                                             Spacing * 2));
10121     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10122                                             Spacing * 3));
10123     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10124     TmpInst.addOperand(Inst.getOperand(4));
10125     Inst = TmpInst;
10126     return true;
10127   }
10128 
10129   case ARM::VST4dWB_fixed_Asm_8:
10130   case ARM::VST4dWB_fixed_Asm_16:
10131   case ARM::VST4dWB_fixed_Asm_32:
10132   case ARM::VST4qWB_fixed_Asm_8:
10133   case ARM::VST4qWB_fixed_Asm_16:
10134   case ARM::VST4qWB_fixed_Asm_32: {
10135     MCInst TmpInst;
10136     unsigned Spacing;
10137     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10138     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10139     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10140     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10141     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10142     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10143     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10144                                             Spacing));
10145     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10146                                             Spacing * 2));
10147     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10148                                             Spacing * 3));
10149     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10150     TmpInst.addOperand(Inst.getOperand(4));
10151     Inst = TmpInst;
10152     return true;
10153   }
10154 
10155   case ARM::VST4dWB_register_Asm_8:
10156   case ARM::VST4dWB_register_Asm_16:
10157   case ARM::VST4dWB_register_Asm_32:
10158   case ARM::VST4qWB_register_Asm_8:
10159   case ARM::VST4qWB_register_Asm_16:
10160   case ARM::VST4qWB_register_Asm_32: {
10161     MCInst TmpInst;
10162     unsigned Spacing;
10163     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10164     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10165     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10166     TmpInst.addOperand(Inst.getOperand(2)); // alignment
10167     TmpInst.addOperand(Inst.getOperand(3)); // Rm
10168     TmpInst.addOperand(Inst.getOperand(0)); // Vd
10169     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10170                                             Spacing));
10171     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10172                                             Spacing * 2));
10173     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
10174                                             Spacing * 3));
10175     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10176     TmpInst.addOperand(Inst.getOperand(5));
10177     Inst = TmpInst;
10178     return true;
10179   }
10180 
10181   // Handle encoding choice for the shift-immediate instructions.
10182   case ARM::t2LSLri:
10183   case ARM::t2LSRri:
10184   case ARM::t2ASRri:
10185     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10186         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10187         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10188         !HasWideQualifier) {
10189       unsigned NewOpc;
10190       switch (Inst.getOpcode()) {
10191       default: llvm_unreachable("unexpected opcode");
10192       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10193       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10194       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10195       }
10196       // The Thumb1 operands aren't in the same order. Awesome, eh?
10197       MCInst TmpInst;
10198       TmpInst.setOpcode(NewOpc);
10199       TmpInst.addOperand(Inst.getOperand(0));
10200       TmpInst.addOperand(Inst.getOperand(5));
10201       TmpInst.addOperand(Inst.getOperand(1));
10202       TmpInst.addOperand(Inst.getOperand(2));
10203       TmpInst.addOperand(Inst.getOperand(3));
10204       TmpInst.addOperand(Inst.getOperand(4));
10205       Inst = TmpInst;
10206       return true;
10207     }
10208     return false;
10209 
10210   // Handle the Thumb2 mode MOV complex aliases.
10211   case ARM::t2MOVsr:
10212   case ARM::t2MOVSsr: {
10213     // Which instruction to expand to depends on the CCOut operand and
10214     // whether we're in an IT block if the register operands are low
10215     // registers.
10216     bool isNarrow = false;
10217     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10218         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10219         isARMLowRegister(Inst.getOperand(2).getReg()) &&
10220         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10221         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10222         !HasWideQualifier)
10223       isNarrow = true;
10224     MCInst TmpInst;
10225     unsigned newOpc;
10226     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10227     default: llvm_unreachable("unexpected opcode!");
10228     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10229     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10230     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10231     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
10232     }
10233     TmpInst.setOpcode(newOpc);
10234     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10235     if (isNarrow)
10236       TmpInst.addOperand(MCOperand::createReg(
10237           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10238     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10239     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10240     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10241     TmpInst.addOperand(Inst.getOperand(5));
10242     if (!isNarrow)
10243       TmpInst.addOperand(MCOperand::createReg(
10244           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10245     Inst = TmpInst;
10246     return true;
10247   }
10248   case ARM::t2MOVsi:
10249   case ARM::t2MOVSsi: {
10250     // Which instruction to expand to depends on the CCOut operand and
10251     // whether we're in an IT block if the register operands are low
10252     // registers.
10253     bool isNarrow = false;
10254     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10255         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10256         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10257         !HasWideQualifier)
10258       isNarrow = true;
10259     MCInst TmpInst;
10260     unsigned newOpc;
10261     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10262     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10263     bool isMov = false;
10264     // MOV rd, rm, LSL #0 is actually a MOV instruction
10265     if (Shift == ARM_AM::lsl && Amount == 0) {
10266       isMov = true;
10267       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10268       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10269       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10270       // instead.
10271       if (inITBlock()) {
10272         isNarrow = false;
10273       }
10274       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10275     } else {
10276       switch(Shift) {
10277       default: llvm_unreachable("unexpected opcode!");
10278       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10279       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10280       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10281       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10282       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10283       }
10284     }
10285     if (Amount == 32) Amount = 0;
10286     TmpInst.setOpcode(newOpc);
10287     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10288     if (isNarrow && !isMov)
10289       TmpInst.addOperand(MCOperand::createReg(
10290           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10291     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10292     if (newOpc != ARM::t2RRX && !isMov)
10293       TmpInst.addOperand(MCOperand::createImm(Amount));
10294     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10295     TmpInst.addOperand(Inst.getOperand(4));
10296     if (!isNarrow)
10297       TmpInst.addOperand(MCOperand::createReg(
10298           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10299     Inst = TmpInst;
10300     return true;
10301   }
10302   // Handle the ARM mode MOV complex aliases.
10303   case ARM::ASRr:
10304   case ARM::LSRr:
10305   case ARM::LSLr:
10306   case ARM::RORr: {
10307     ARM_AM::ShiftOpc ShiftTy;
10308     switch(Inst.getOpcode()) {
10309     default: llvm_unreachable("unexpected opcode!");
10310     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10311     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10312     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10313     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10314     }
10315     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10316     MCInst TmpInst;
10317     TmpInst.setOpcode(ARM::MOVsr);
10318     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10319     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10320     TmpInst.addOperand(Inst.getOperand(2)); // Rm
10321     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10322     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10323     TmpInst.addOperand(Inst.getOperand(4));
10324     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10325     Inst = TmpInst;
10326     return true;
10327   }
10328   case ARM::ASRi:
10329   case ARM::LSRi:
10330   case ARM::LSLi:
10331   case ARM::RORi: {
10332     ARM_AM::ShiftOpc ShiftTy;
10333     switch(Inst.getOpcode()) {
10334     default: llvm_unreachable("unexpected opcode!");
10335     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10336     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10337     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10338     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10339     }
10340     // A shift by zero is a plain MOVr, not a MOVsi.
10341     unsigned Amt = Inst.getOperand(2).getImm();
10342     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10343     // A shift by 32 should be encoded as 0 when permitted
10344     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10345       Amt = 0;
10346     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10347     MCInst TmpInst;
10348     TmpInst.setOpcode(Opc);
10349     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10350     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10351     if (Opc == ARM::MOVsi)
10352       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10353     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10354     TmpInst.addOperand(Inst.getOperand(4));
10355     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10356     Inst = TmpInst;
10357     return true;
10358   }
10359   case ARM::RRXi: {
10360     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10361     MCInst TmpInst;
10362     TmpInst.setOpcode(ARM::MOVsi);
10363     TmpInst.addOperand(Inst.getOperand(0)); // Rd
10364     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10365     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10366     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10367     TmpInst.addOperand(Inst.getOperand(3));
10368     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10369     Inst = TmpInst;
10370     return true;
10371   }
10372   case ARM::t2LDMIA_UPD: {
10373     // If this is a load of a single register, then we should use
10374     // a post-indexed LDR instruction instead, per the ARM ARM.
10375     if (Inst.getNumOperands() != 5)
10376       return false;
10377     MCInst TmpInst;
10378     TmpInst.setOpcode(ARM::t2LDR_POST);
10379     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10380     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10381     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10382     TmpInst.addOperand(MCOperand::createImm(4));
10383     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10384     TmpInst.addOperand(Inst.getOperand(3));
10385     Inst = TmpInst;
10386     return true;
10387   }
10388   case ARM::t2STMDB_UPD: {
10389     // If this is a store of a single register, then we should use
10390     // a pre-indexed STR instruction instead, per the ARM ARM.
10391     if (Inst.getNumOperands() != 5)
10392       return false;
10393     MCInst TmpInst;
10394     TmpInst.setOpcode(ARM::t2STR_PRE);
10395     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10396     TmpInst.addOperand(Inst.getOperand(4)); // Rt
10397     TmpInst.addOperand(Inst.getOperand(1)); // Rn
10398     TmpInst.addOperand(MCOperand::createImm(-4));
10399     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10400     TmpInst.addOperand(Inst.getOperand(3));
10401     Inst = TmpInst;
10402     return true;
10403   }
10404   case ARM::LDMIA_UPD:
10405     // If this is a load of a single register via a 'pop', then we should use
10406     // a post-indexed LDR instruction instead, per the ARM ARM.
10407     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10408         Inst.getNumOperands() == 5) {
10409       MCInst TmpInst;
10410       TmpInst.setOpcode(ARM::LDR_POST_IMM);
10411       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10412       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10413       TmpInst.addOperand(Inst.getOperand(1)); // Rn
10414       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
10415       TmpInst.addOperand(MCOperand::createImm(4));
10416       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10417       TmpInst.addOperand(Inst.getOperand(3));
10418       Inst = TmpInst;
10419       return true;
10420     }
10421     break;
10422   case ARM::STMDB_UPD:
10423     // If this is a store of a single register via a 'push', then we should use
10424     // a pre-indexed STR instruction instead, per the ARM ARM.
10425     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10426         Inst.getNumOperands() == 5) {
10427       MCInst TmpInst;
10428       TmpInst.setOpcode(ARM::STR_PRE_IMM);
10429       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10430       TmpInst.addOperand(Inst.getOperand(4)); // Rt
10431       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10432       TmpInst.addOperand(MCOperand::createImm(-4));
10433       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10434       TmpInst.addOperand(Inst.getOperand(3));
10435       Inst = TmpInst;
10436     }
10437     break;
10438   case ARM::t2ADDri12:
10439   case ARM::t2SUBri12:
10440   case ARM::t2ADDspImm12:
10441   case ARM::t2SUBspImm12: {
10442     // If the immediate fits for encoding T3 and the generic
10443     // mnemonic was used, encoding T3 is preferred.
10444     const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10445     if ((Token != "add" && Token != "sub") ||
10446         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10447       break;
10448     switch (Inst.getOpcode()) {
10449     case ARM::t2ADDri12:
10450       Inst.setOpcode(ARM::t2ADDri);
10451       break;
10452     case ARM::t2SUBri12:
10453       Inst.setOpcode(ARM::t2SUBri);
10454       break;
10455     case ARM::t2ADDspImm12:
10456       Inst.setOpcode(ARM::t2ADDspImm);
10457       break;
10458     case ARM::t2SUBspImm12:
10459       Inst.setOpcode(ARM::t2SUBspImm);
10460       break;
10461     }
10462 
10463     Inst.addOperand(MCOperand::createReg(0)); // cc_out
10464     return true;
10465   }
10466   case ARM::tADDi8:
10467     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10468     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10469     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10470     // to encoding T1 if <Rd> is omitted."
10471     if (Inst.getOperand(3).isImm() &&
10472         (unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10473       Inst.setOpcode(ARM::tADDi3);
10474       return true;
10475     }
10476     break;
10477   case ARM::tSUBi8:
10478     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10479     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10480     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10481     // to encoding T1 if <Rd> is omitted."
10482     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10483       Inst.setOpcode(ARM::tSUBi3);
10484       return true;
10485     }
10486     break;
10487   case ARM::t2ADDri:
10488   case ARM::t2SUBri: {
10489     // If the destination and first source operand are the same, and
10490     // the flags are compatible with the current IT status, use encoding T2
10491     // instead of T3. For compatibility with the system 'as'. Make sure the
10492     // wide encoding wasn't explicit.
10493     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10494         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10495         (Inst.getOperand(2).isImm() &&
10496          (unsigned)Inst.getOperand(2).getImm() > 255) ||
10497         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10498         HasWideQualifier)
10499       break;
10500     MCInst TmpInst;
10501     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10502                       ARM::tADDi8 : ARM::tSUBi8);
10503     TmpInst.addOperand(Inst.getOperand(0));
10504     TmpInst.addOperand(Inst.getOperand(5));
10505     TmpInst.addOperand(Inst.getOperand(0));
10506     TmpInst.addOperand(Inst.getOperand(2));
10507     TmpInst.addOperand(Inst.getOperand(3));
10508     TmpInst.addOperand(Inst.getOperand(4));
10509     Inst = TmpInst;
10510     return true;
10511   }
10512   case ARM::t2ADDspImm:
10513   case ARM::t2SUBspImm: {
10514     // Prefer T1 encoding if possible
10515     if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10516       break;
10517     unsigned V = Inst.getOperand(2).getImm();
10518     if (V & 3 || V > ((1 << 7) - 1) << 2)
10519       break;
10520     MCInst TmpInst;
10521     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10522                                                           : ARM::tSUBspi);
10523     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10524     TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10525     TmpInst.addOperand(MCOperand::createImm(V / 4));   // immediate
10526     TmpInst.addOperand(Inst.getOperand(3));            // pred
10527     TmpInst.addOperand(Inst.getOperand(4));
10528     Inst = TmpInst;
10529     return true;
10530   }
10531   case ARM::t2ADDrr: {
10532     // If the destination and first source operand are the same, and
10533     // there's no setting of the flags, use encoding T2 instead of T3.
10534     // Note that this is only for ADD, not SUB. This mirrors the system
10535     // 'as' behaviour.  Also take advantage of ADD being commutative.
10536     // Make sure the wide encoding wasn't explicit.
10537     bool Swap = false;
10538     auto DestReg = Inst.getOperand(0).getReg();
10539     bool Transform = DestReg == Inst.getOperand(1).getReg();
10540     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10541       Transform = true;
10542       Swap = true;
10543     }
10544     if (!Transform ||
10545         Inst.getOperand(5).getReg() != 0 ||
10546         HasWideQualifier)
10547       break;
10548     MCInst TmpInst;
10549     TmpInst.setOpcode(ARM::tADDhirr);
10550     TmpInst.addOperand(Inst.getOperand(0));
10551     TmpInst.addOperand(Inst.getOperand(0));
10552     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10553     TmpInst.addOperand(Inst.getOperand(3));
10554     TmpInst.addOperand(Inst.getOperand(4));
10555     Inst = TmpInst;
10556     return true;
10557   }
10558   case ARM::tADDrSP:
10559     // If the non-SP source operand and the destination operand are not the
10560     // same, we need to use the 32-bit encoding if it's available.
10561     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10562       Inst.setOpcode(ARM::t2ADDrr);
10563       Inst.addOperand(MCOperand::createReg(0)); // cc_out
10564       return true;
10565     }
10566     break;
10567   case ARM::tB:
10568     // A Thumb conditional branch outside of an IT block is a tBcc.
10569     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10570       Inst.setOpcode(ARM::tBcc);
10571       return true;
10572     }
10573     break;
10574   case ARM::t2B:
10575     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10576     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10577       Inst.setOpcode(ARM::t2Bcc);
10578       return true;
10579     }
10580     break;
10581   case ARM::t2Bcc:
10582     // If the conditional is AL or we're in an IT block, we really want t2B.
10583     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10584       Inst.setOpcode(ARM::t2B);
10585       return true;
10586     }
10587     break;
10588   case ARM::tBcc:
10589     // If the conditional is AL, we really want tB.
10590     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10591       Inst.setOpcode(ARM::tB);
10592       return true;
10593     }
10594     break;
10595   case ARM::tLDMIA: {
10596     // If the register list contains any high registers, or if the writeback
10597     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10598     // instead if we're in Thumb2. Otherwise, this should have generated
10599     // an error in validateInstruction().
10600     unsigned Rn = Inst.getOperand(0).getReg();
10601     bool hasWritebackToken =
10602         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
10603          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
10604     bool listContainsBase;
10605     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10606         (!listContainsBase && !hasWritebackToken) ||
10607         (listContainsBase && hasWritebackToken)) {
10608       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10609       assert(isThumbTwo());
10610       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10611       // If we're switching to the updating version, we need to insert
10612       // the writeback tied operand.
10613       if (hasWritebackToken)
10614         Inst.insert(Inst.begin(),
10615                     MCOperand::createReg(Inst.getOperand(0).getReg()));
10616       return true;
10617     }
10618     break;
10619   }
10620   case ARM::tSTMIA_UPD: {
10621     // If the register list contains any high registers, we need to use
10622     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10623     // should have generated an error in validateInstruction().
10624     unsigned Rn = Inst.getOperand(0).getReg();
10625     bool listContainsBase;
10626     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10627       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10628       assert(isThumbTwo());
10629       Inst.setOpcode(ARM::t2STMIA_UPD);
10630       return true;
10631     }
10632     break;
10633   }
10634   case ARM::tPOP: {
10635     bool listContainsBase;
10636     // If the register list contains any high registers, we need to use
10637     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10638     // should have generated an error in validateInstruction().
10639     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10640       return false;
10641     assert(isThumbTwo());
10642     Inst.setOpcode(ARM::t2LDMIA_UPD);
10643     // Add the base register and writeback operands.
10644     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10645     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10646     return true;
10647   }
10648   case ARM::tPUSH: {
10649     bool listContainsBase;
10650     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10651       return false;
10652     assert(isThumbTwo());
10653     Inst.setOpcode(ARM::t2STMDB_UPD);
10654     // Add the base register and writeback operands.
10655     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10656     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10657     return true;
10658   }
10659   case ARM::t2MOVi:
10660     // If we can use the 16-bit encoding and the user didn't explicitly
10661     // request the 32-bit variant, transform it here.
10662     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10663         (Inst.getOperand(1).isImm() &&
10664          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10665         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10666         !HasWideQualifier) {
10667       // The operands aren't in the same order for tMOVi8...
10668       MCInst TmpInst;
10669       TmpInst.setOpcode(ARM::tMOVi8);
10670       TmpInst.addOperand(Inst.getOperand(0));
10671       TmpInst.addOperand(Inst.getOperand(4));
10672       TmpInst.addOperand(Inst.getOperand(1));
10673       TmpInst.addOperand(Inst.getOperand(2));
10674       TmpInst.addOperand(Inst.getOperand(3));
10675       Inst = TmpInst;
10676       return true;
10677     }
10678     break;
10679 
10680   case ARM::t2MOVr:
10681     // If we can use the 16-bit encoding and the user didn't explicitly
10682     // request the 32-bit variant, transform it here.
10683     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10684         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10685         Inst.getOperand(2).getImm() == ARMCC::AL &&
10686         Inst.getOperand(4).getReg() == ARM::CPSR &&
10687         !HasWideQualifier) {
10688       // The operands aren't the same for tMOV[S]r... (no cc_out)
10689       MCInst TmpInst;
10690       unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10691       TmpInst.setOpcode(Op);
10692       TmpInst.addOperand(Inst.getOperand(0));
10693       TmpInst.addOperand(Inst.getOperand(1));
10694       if (Op == ARM::tMOVr) {
10695         TmpInst.addOperand(Inst.getOperand(2));
10696         TmpInst.addOperand(Inst.getOperand(3));
10697       }
10698       Inst = TmpInst;
10699       return true;
10700     }
10701     break;
10702 
10703   case ARM::t2SXTH:
10704   case ARM::t2SXTB:
10705   case ARM::t2UXTH:
10706   case ARM::t2UXTB:
10707     // If we can use the 16-bit encoding and the user didn't explicitly
10708     // request the 32-bit variant, transform it here.
10709     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10710         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10711         Inst.getOperand(2).getImm() == 0 &&
10712         !HasWideQualifier) {
10713       unsigned NewOpc;
10714       switch (Inst.getOpcode()) {
10715       default: llvm_unreachable("Illegal opcode!");
10716       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10717       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10718       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10719       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10720       }
10721       // The operands aren't the same for thumb1 (no rotate operand).
10722       MCInst TmpInst;
10723       TmpInst.setOpcode(NewOpc);
10724       TmpInst.addOperand(Inst.getOperand(0));
10725       TmpInst.addOperand(Inst.getOperand(1));
10726       TmpInst.addOperand(Inst.getOperand(3));
10727       TmpInst.addOperand(Inst.getOperand(4));
10728       Inst = TmpInst;
10729       return true;
10730     }
10731     break;
10732 
10733   case ARM::MOVsi: {
10734     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10735     // rrx shifts and asr/lsr of #32 is encoded as 0
10736     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10737       return false;
10738     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10739       // Shifting by zero is accepted as a vanilla 'MOVr'
10740       MCInst TmpInst;
10741       TmpInst.setOpcode(ARM::MOVr);
10742       TmpInst.addOperand(Inst.getOperand(0));
10743       TmpInst.addOperand(Inst.getOperand(1));
10744       TmpInst.addOperand(Inst.getOperand(3));
10745       TmpInst.addOperand(Inst.getOperand(4));
10746       TmpInst.addOperand(Inst.getOperand(5));
10747       Inst = TmpInst;
10748       return true;
10749     }
10750     return false;
10751   }
10752   case ARM::ANDrsi:
10753   case ARM::ORRrsi:
10754   case ARM::EORrsi:
10755   case ARM::BICrsi:
10756   case ARM::SUBrsi:
10757   case ARM::ADDrsi: {
10758     unsigned newOpc;
10759     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
10760     if (SOpc == ARM_AM::rrx) return false;
10761     switch (Inst.getOpcode()) {
10762     default: llvm_unreachable("unexpected opcode!");
10763     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10764     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10765     case ARM::EORrsi: newOpc = ARM::EORrr; break;
10766     case ARM::BICrsi: newOpc = ARM::BICrr; break;
10767     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10768     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10769     }
10770     // If the shift is by zero, use the non-shifted instruction definition.
10771     // The exception is for right shifts, where 0 == 32
10772     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10773         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10774       MCInst TmpInst;
10775       TmpInst.setOpcode(newOpc);
10776       TmpInst.addOperand(Inst.getOperand(0));
10777       TmpInst.addOperand(Inst.getOperand(1));
10778       TmpInst.addOperand(Inst.getOperand(2));
10779       TmpInst.addOperand(Inst.getOperand(4));
10780       TmpInst.addOperand(Inst.getOperand(5));
10781       TmpInst.addOperand(Inst.getOperand(6));
10782       Inst = TmpInst;
10783       return true;
10784     }
10785     return false;
10786   }
10787   case ARM::ITasm:
10788   case ARM::t2IT: {
10789     // Set up the IT block state according to the IT instruction we just
10790     // matched.
10791     assert(!inITBlock() && "nested IT blocks?!");
10792     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10793                          Inst.getOperand(1).getImm());
10794     break;
10795   }
10796   case ARM::t2LSLrr:
10797   case ARM::t2LSRrr:
10798   case ARM::t2ASRrr:
10799   case ARM::t2SBCrr:
10800   case ARM::t2RORrr:
10801   case ARM::t2BICrr:
10802     // Assemblers should use the narrow encodings of these instructions when permissible.
10803     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10804          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10805         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10806         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10807         !HasWideQualifier) {
10808       unsigned NewOpc;
10809       switch (Inst.getOpcode()) {
10810         default: llvm_unreachable("unexpected opcode");
10811         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10812         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10813         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10814         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10815         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10816         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10817       }
10818       MCInst TmpInst;
10819       TmpInst.setOpcode(NewOpc);
10820       TmpInst.addOperand(Inst.getOperand(0));
10821       TmpInst.addOperand(Inst.getOperand(5));
10822       TmpInst.addOperand(Inst.getOperand(1));
10823       TmpInst.addOperand(Inst.getOperand(2));
10824       TmpInst.addOperand(Inst.getOperand(3));
10825       TmpInst.addOperand(Inst.getOperand(4));
10826       Inst = TmpInst;
10827       return true;
10828     }
10829     return false;
10830 
10831   case ARM::t2ANDrr:
10832   case ARM::t2EORrr:
10833   case ARM::t2ADCrr:
10834   case ARM::t2ORRrr:
10835     // Assemblers should use the narrow encodings of these instructions when permissible.
10836     // These instructions are special in that they are commutable, so shorter encodings
10837     // are available more often.
10838     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10839          isARMLowRegister(Inst.getOperand(2).getReg())) &&
10840         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10841          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10842         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10843         !HasWideQualifier) {
10844       unsigned NewOpc;
10845       switch (Inst.getOpcode()) {
10846         default: llvm_unreachable("unexpected opcode");
10847         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10848         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10849         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10850         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10851       }
10852       MCInst TmpInst;
10853       TmpInst.setOpcode(NewOpc);
10854       TmpInst.addOperand(Inst.getOperand(0));
10855       TmpInst.addOperand(Inst.getOperand(5));
10856       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
10857         TmpInst.addOperand(Inst.getOperand(1));
10858         TmpInst.addOperand(Inst.getOperand(2));
10859       } else {
10860         TmpInst.addOperand(Inst.getOperand(2));
10861         TmpInst.addOperand(Inst.getOperand(1));
10862       }
10863       TmpInst.addOperand(Inst.getOperand(3));
10864       TmpInst.addOperand(Inst.getOperand(4));
10865       Inst = TmpInst;
10866       return true;
10867     }
10868     return false;
10869   case ARM::MVE_VPST:
10870   case ARM::MVE_VPTv16i8:
10871   case ARM::MVE_VPTv8i16:
10872   case ARM::MVE_VPTv4i32:
10873   case ARM::MVE_VPTv16u8:
10874   case ARM::MVE_VPTv8u16:
10875   case ARM::MVE_VPTv4u32:
10876   case ARM::MVE_VPTv16s8:
10877   case ARM::MVE_VPTv8s16:
10878   case ARM::MVE_VPTv4s32:
10879   case ARM::MVE_VPTv4f32:
10880   case ARM::MVE_VPTv8f16:
10881   case ARM::MVE_VPTv16i8r:
10882   case ARM::MVE_VPTv8i16r:
10883   case ARM::MVE_VPTv4i32r:
10884   case ARM::MVE_VPTv16u8r:
10885   case ARM::MVE_VPTv8u16r:
10886   case ARM::MVE_VPTv4u32r:
10887   case ARM::MVE_VPTv16s8r:
10888   case ARM::MVE_VPTv8s16r:
10889   case ARM::MVE_VPTv4s32r:
10890   case ARM::MVE_VPTv4f32r:
10891   case ARM::MVE_VPTv8f16r: {
10892     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
10893     MCOperand &MO = Inst.getOperand(0);
10894     VPTState.Mask = MO.getImm();
10895     VPTState.CurPosition = 0;
10896     break;
10897   }
10898   }
10899   return false;
10900 }
10901 
10902 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
10903   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
10904   // suffix depending on whether they're in an IT block or not.
10905   unsigned Opc = Inst.getOpcode();
10906   const MCInstrDesc &MCID = MII.get(Opc);
10907   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
10908     assert(MCID.hasOptionalDef() &&
10909            "optionally flag setting instruction missing optional def operand");
10910     assert(MCID.NumOperands == Inst.getNumOperands() &&
10911            "operand count mismatch!");
10912     // Find the optional-def operand (cc_out).
10913     unsigned OpNo;
10914     for (OpNo = 0;
10915          OpNo < MCID.NumOperands && !MCID.operands()[OpNo].isOptionalDef();
10916          ++OpNo)
10917       ;
10918     // If we're parsing Thumb1, reject it completely.
10919     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
10920       return Match_RequiresFlagSetting;
10921     // If we're parsing Thumb2, which form is legal depends on whether we're
10922     // in an IT block.
10923     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
10924         !inITBlock())
10925       return Match_RequiresITBlock;
10926     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
10927         inITBlock())
10928       return Match_RequiresNotITBlock;
10929     // LSL with zero immediate is not allowed in an IT block
10930     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
10931       return Match_RequiresNotITBlock;
10932   } else if (isThumbOne()) {
10933     // Some high-register supporting Thumb1 encodings only allow both registers
10934     // to be from r0-r7 when in Thumb2.
10935     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
10936         isARMLowRegister(Inst.getOperand(1).getReg()) &&
10937         isARMLowRegister(Inst.getOperand(2).getReg()))
10938       return Match_RequiresThumb2;
10939     // Others only require ARMv6 or later.
10940     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
10941              isARMLowRegister(Inst.getOperand(0).getReg()) &&
10942              isARMLowRegister(Inst.getOperand(1).getReg()))
10943       return Match_RequiresV6;
10944   }
10945 
10946   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
10947   // than the loop below can handle, so it uses the GPRnopc register class and
10948   // we do SP handling here.
10949   if (Opc == ARM::t2MOVr && !hasV8Ops())
10950   {
10951     // SP as both source and destination is not allowed
10952     if (Inst.getOperand(0).getReg() == ARM::SP &&
10953         Inst.getOperand(1).getReg() == ARM::SP)
10954       return Match_RequiresV8;
10955     // When flags-setting SP as either source or destination is not allowed
10956     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
10957         (Inst.getOperand(0).getReg() == ARM::SP ||
10958          Inst.getOperand(1).getReg() == ARM::SP))
10959       return Match_RequiresV8;
10960   }
10961 
10962   switch (Inst.getOpcode()) {
10963   case ARM::VMRS:
10964   case ARM::VMSR:
10965   case ARM::VMRS_FPCXTS:
10966   case ARM::VMRS_FPCXTNS:
10967   case ARM::VMSR_FPCXTS:
10968   case ARM::VMSR_FPCXTNS:
10969   case ARM::VMRS_FPSCR_NZCVQC:
10970   case ARM::VMSR_FPSCR_NZCVQC:
10971   case ARM::FMSTAT:
10972   case ARM::VMRS_VPR:
10973   case ARM::VMRS_P0:
10974   case ARM::VMSR_VPR:
10975   case ARM::VMSR_P0:
10976     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
10977     // ARMv8-A.
10978     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
10979         (isThumb() && !hasV8Ops()))
10980       return Match_InvalidOperand;
10981     break;
10982   case ARM::t2TBB:
10983   case ARM::t2TBH:
10984     // Rn = sp is only allowed with ARMv8-A
10985     if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
10986       return Match_RequiresV8;
10987     break;
10988   default:
10989     break;
10990   }
10991 
10992   for (unsigned I = 0; I < MCID.NumOperands; ++I)
10993     if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
10994       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
10995       const auto &Op = Inst.getOperand(I);
10996       if (!Op.isReg()) {
10997         // This can happen in awkward cases with tied operands, e.g. a
10998         // writeback load/store with a complex addressing mode in
10999         // which there's an output operand corresponding to the
11000         // updated written-back base register: the Tablegen-generated
11001         // AsmMatcher will have written a placeholder operand to that
11002         // slot in the form of an immediate 0, because it can't
11003         // generate the register part of the complex addressing-mode
11004         // operand ahead of time.
11005         continue;
11006       }
11007 
11008       unsigned Reg = Op.getReg();
11009       if ((Reg == ARM::SP) && !hasV8Ops())
11010         return Match_RequiresV8;
11011       else if (Reg == ARM::PC)
11012         return Match_InvalidOperand;
11013     }
11014 
11015   return Match_Success;
11016 }
11017 
11018 namespace llvm {
11019 
11020 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11021   return true; // In an assembly source, no need to second-guess
11022 }
11023 
11024 } // end namespace llvm
11025 
11026 // Returns true if Inst is unpredictable if it is in and IT block, but is not
11027 // the last instruction in the block.
11028 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11029   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11030 
11031   // All branch & call instructions terminate IT blocks with the exception of
11032   // SVC.
11033   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11034       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11035     return true;
11036 
11037   // Any arithmetic instruction which writes to the PC also terminates the IT
11038   // block.
11039   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11040     return true;
11041 
11042   return false;
11043 }
11044 
11045 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11046                                           SmallVectorImpl<NearMissInfo> &NearMisses,
11047                                           bool MatchingInlineAsm,
11048                                           bool &EmitInITBlock,
11049                                           MCStreamer &Out) {
11050   // If we can't use an implicit IT block here, just match as normal.
11051   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11052     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11053 
11054   // Try to match the instruction in an extension of the current IT block (if
11055   // there is one).
11056   if (inImplicitITBlock()) {
11057     extendImplicitITBlock(ITState.Cond);
11058     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11059             Match_Success) {
11060       // The match succeded, but we still have to check that the instruction is
11061       // valid in this implicit IT block.
11062       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11063       if (MCID.isPredicable()) {
11064         ARMCC::CondCodes InstCond =
11065             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11066                 .getImm();
11067         ARMCC::CondCodes ITCond = currentITCond();
11068         if (InstCond == ITCond) {
11069           EmitInITBlock = true;
11070           return Match_Success;
11071         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11072           invertCurrentITCondition();
11073           EmitInITBlock = true;
11074           return Match_Success;
11075         }
11076       }
11077     }
11078     rewindImplicitITPosition();
11079   }
11080 
11081   // Finish the current IT block, and try to match outside any IT block.
11082   flushPendingInstructions(Out);
11083   unsigned PlainMatchResult =
11084       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11085   if (PlainMatchResult == Match_Success) {
11086     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11087     if (MCID.isPredicable()) {
11088       ARMCC::CondCodes InstCond =
11089           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11090               .getImm();
11091       // Some forms of the branch instruction have their own condition code
11092       // fields, so can be conditionally executed without an IT block.
11093       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11094         EmitInITBlock = false;
11095         return Match_Success;
11096       }
11097       if (InstCond == ARMCC::AL) {
11098         EmitInITBlock = false;
11099         return Match_Success;
11100       }
11101     } else {
11102       EmitInITBlock = false;
11103       return Match_Success;
11104     }
11105   }
11106 
11107   // Try to match in a new IT block. The matcher doesn't check the actual
11108   // condition, so we create an IT block with a dummy condition, and fix it up
11109   // once we know the actual condition.
11110   startImplicitITBlock();
11111   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11112       Match_Success) {
11113     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11114     if (MCID.isPredicable()) {
11115       ITState.Cond =
11116           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
11117               .getImm();
11118       EmitInITBlock = true;
11119       return Match_Success;
11120     }
11121   }
11122   discardImplicitITBlock();
11123 
11124   // If none of these succeed, return the error we got when trying to match
11125   // outside any IT blocks.
11126   EmitInITBlock = false;
11127   return PlainMatchResult;
11128 }
11129 
11130 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11131                                          unsigned VariantID = 0);
11132 
11133 static const char *getSubtargetFeatureName(uint64_t Val);
11134 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11135                                            OperandVector &Operands,
11136                                            MCStreamer &Out, uint64_t &ErrorInfo,
11137                                            bool MatchingInlineAsm) {
11138   MCInst Inst;
11139   unsigned MatchResult;
11140   bool PendConditionalInstruction = false;
11141 
11142   SmallVector<NearMissInfo, 4> NearMisses;
11143   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11144                                  PendConditionalInstruction, Out);
11145 
11146   switch (MatchResult) {
11147   case Match_Success:
11148     LLVM_DEBUG(dbgs() << "Parsed as: ";
11149                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11150                dbgs() << "\n");
11151 
11152     // Context sensitive operand constraints aren't handled by the matcher,
11153     // so check them here.
11154     if (validateInstruction(Inst, Operands)) {
11155       // Still progress the IT block, otherwise one wrong condition causes
11156       // nasty cascading errors.
11157       forwardITPosition();
11158       forwardVPTPosition();
11159       return true;
11160     }
11161 
11162     {
11163       // Some instructions need post-processing to, for example, tweak which
11164       // encoding is selected. Loop on it while changes happen so the
11165       // individual transformations can chain off each other. E.g.,
11166       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11167       while (processInstruction(Inst, Operands, Out))
11168         LLVM_DEBUG(dbgs() << "Changed to: ";
11169                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11170                    dbgs() << "\n");
11171     }
11172 
11173     // Only move forward at the very end so that everything in validate
11174     // and process gets a consistent answer about whether we're in an IT
11175     // block.
11176     forwardITPosition();
11177     forwardVPTPosition();
11178 
11179     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11180     // doesn't actually encode.
11181     if (Inst.getOpcode() == ARM::ITasm)
11182       return false;
11183 
11184     Inst.setLoc(IDLoc);
11185     if (PendConditionalInstruction) {
11186       PendingConditionalInsts.push_back(Inst);
11187       if (isITBlockFull() || isITBlockTerminator(Inst))
11188         flushPendingInstructions(Out);
11189     } else {
11190       Out.emitInstruction(Inst, getSTI());
11191     }
11192     return false;
11193   case Match_NearMisses:
11194     ReportNearMisses(NearMisses, IDLoc, Operands);
11195     return true;
11196   case Match_MnemonicFail: {
11197     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11198     std::string Suggestion = ARMMnemonicSpellCheck(
11199       ((ARMOperand &)*Operands[0]).getToken(), FBS);
11200     return Error(IDLoc, "invalid instruction" + Suggestion,
11201                  ((ARMOperand &)*Operands[0]).getLocRange());
11202   }
11203   }
11204 
11205   llvm_unreachable("Implement any new match types added!");
11206 }
11207 
11208 /// parseDirective parses the arm specific directives
11209 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11210   const MCContext::Environment Format = getContext().getObjectFileType();
11211   bool IsMachO = Format == MCContext::IsMachO;
11212   bool IsCOFF = Format == MCContext::IsCOFF;
11213 
11214   std::string IDVal = DirectiveID.getIdentifier().lower();
11215   if (IDVal == ".word")
11216     parseLiteralValues(4, DirectiveID.getLoc());
11217   else if (IDVal == ".short" || IDVal == ".hword")
11218     parseLiteralValues(2, DirectiveID.getLoc());
11219   else if (IDVal == ".thumb")
11220     parseDirectiveThumb(DirectiveID.getLoc());
11221   else if (IDVal == ".arm")
11222     parseDirectiveARM(DirectiveID.getLoc());
11223   else if (IDVal == ".thumb_func")
11224     parseDirectiveThumbFunc(DirectiveID.getLoc());
11225   else if (IDVal == ".code")
11226     parseDirectiveCode(DirectiveID.getLoc());
11227   else if (IDVal == ".syntax")
11228     parseDirectiveSyntax(DirectiveID.getLoc());
11229   else if (IDVal == ".unreq")
11230     parseDirectiveUnreq(DirectiveID.getLoc());
11231   else if (IDVal == ".fnend")
11232     parseDirectiveFnEnd(DirectiveID.getLoc());
11233   else if (IDVal == ".cantunwind")
11234     parseDirectiveCantUnwind(DirectiveID.getLoc());
11235   else if (IDVal == ".personality")
11236     parseDirectivePersonality(DirectiveID.getLoc());
11237   else if (IDVal == ".handlerdata")
11238     parseDirectiveHandlerData(DirectiveID.getLoc());
11239   else if (IDVal == ".setfp")
11240     parseDirectiveSetFP(DirectiveID.getLoc());
11241   else if (IDVal == ".pad")
11242     parseDirectivePad(DirectiveID.getLoc());
11243   else if (IDVal == ".save")
11244     parseDirectiveRegSave(DirectiveID.getLoc(), false);
11245   else if (IDVal == ".vsave")
11246     parseDirectiveRegSave(DirectiveID.getLoc(), true);
11247   else if (IDVal == ".ltorg" || IDVal == ".pool")
11248     parseDirectiveLtorg(DirectiveID.getLoc());
11249   else if (IDVal == ".even")
11250     parseDirectiveEven(DirectiveID.getLoc());
11251   else if (IDVal == ".personalityindex")
11252     parseDirectivePersonalityIndex(DirectiveID.getLoc());
11253   else if (IDVal == ".unwind_raw")
11254     parseDirectiveUnwindRaw(DirectiveID.getLoc());
11255   else if (IDVal == ".movsp")
11256     parseDirectiveMovSP(DirectiveID.getLoc());
11257   else if (IDVal == ".arch_extension")
11258     parseDirectiveArchExtension(DirectiveID.getLoc());
11259   else if (IDVal == ".align")
11260     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11261   else if (IDVal == ".thumb_set")
11262     parseDirectiveThumbSet(DirectiveID.getLoc());
11263   else if (IDVal == ".inst")
11264     parseDirectiveInst(DirectiveID.getLoc());
11265   else if (IDVal == ".inst.n")
11266     parseDirectiveInst(DirectiveID.getLoc(), 'n');
11267   else if (IDVal == ".inst.w")
11268     parseDirectiveInst(DirectiveID.getLoc(), 'w');
11269   else if (!IsMachO && !IsCOFF) {
11270     if (IDVal == ".arch")
11271       parseDirectiveArch(DirectiveID.getLoc());
11272     else if (IDVal == ".cpu")
11273       parseDirectiveCPU(DirectiveID.getLoc());
11274     else if (IDVal == ".eabi_attribute")
11275       parseDirectiveEabiAttr(DirectiveID.getLoc());
11276     else if (IDVal == ".fpu")
11277       parseDirectiveFPU(DirectiveID.getLoc());
11278     else if (IDVal == ".fnstart")
11279       parseDirectiveFnStart(DirectiveID.getLoc());
11280     else if (IDVal == ".object_arch")
11281       parseDirectiveObjectArch(DirectiveID.getLoc());
11282     else if (IDVal == ".tlsdescseq")
11283       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11284     else
11285       return true;
11286   } else if (IsCOFF) {
11287     if (IDVal == ".seh_stackalloc")
11288       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11289     else if (IDVal == ".seh_stackalloc_w")
11290       parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11291     else if (IDVal == ".seh_save_regs")
11292       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11293     else if (IDVal == ".seh_save_regs_w")
11294       parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11295     else if (IDVal == ".seh_save_sp")
11296       parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11297     else if (IDVal == ".seh_save_fregs")
11298       parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11299     else if (IDVal == ".seh_save_lr")
11300       parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11301     else if (IDVal == ".seh_endprologue")
11302       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11303     else if (IDVal == ".seh_endprologue_fragment")
11304       parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11305     else if (IDVal == ".seh_nop")
11306       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11307     else if (IDVal == ".seh_nop_w")
11308       parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11309     else if (IDVal == ".seh_startepilogue")
11310       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11311     else if (IDVal == ".seh_startepilogue_cond")
11312       parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11313     else if (IDVal == ".seh_endepilogue")
11314       parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11315     else if (IDVal == ".seh_custom")
11316       parseDirectiveSEHCustom(DirectiveID.getLoc());
11317     else
11318       return true;
11319   } else
11320     return true;
11321   return false;
11322 }
11323 
11324 /// parseLiteralValues
11325 ///  ::= .hword expression [, expression]*
11326 ///  ::= .short expression [, expression]*
11327 ///  ::= .word expression [, expression]*
11328 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11329   auto parseOne = [&]() -> bool {
11330     const MCExpr *Value;
11331     if (getParser().parseExpression(Value))
11332       return true;
11333     getParser().getStreamer().emitValue(Value, Size, L);
11334     return false;
11335   };
11336   return (parseMany(parseOne));
11337 }
11338 
11339 /// parseDirectiveThumb
11340 ///  ::= .thumb
11341 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11342   if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11343     return true;
11344 
11345   if (!isThumb())
11346     SwitchMode();
11347 
11348   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11349   getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11350   return false;
11351 }
11352 
11353 /// parseDirectiveARM
11354 ///  ::= .arm
11355 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11356   if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11357     return true;
11358 
11359   if (isThumb())
11360     SwitchMode();
11361   getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11362   getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11363   return false;
11364 }
11365 
11366 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11367   // We need to flush the current implicit IT block on a label, because it is
11368   // not legal to branch into an IT block.
11369   flushPendingInstructions(getStreamer());
11370 }
11371 
11372 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11373   if (NextSymbolIsThumb) {
11374     getParser().getStreamer().emitThumbFunc(Symbol);
11375     NextSymbolIsThumb = false;
11376   }
11377 }
11378 
11379 /// parseDirectiveThumbFunc
11380 ///  ::= .thumbfunc symbol_name
11381 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11382   MCAsmParser &Parser = getParser();
11383   const auto Format = getContext().getObjectFileType();
11384   bool IsMachO = Format == MCContext::IsMachO;
11385 
11386   // Darwin asm has (optionally) function name after .thumb_func direction
11387   // ELF doesn't
11388 
11389   if (IsMachO) {
11390     if (Parser.getTok().is(AsmToken::Identifier) ||
11391         Parser.getTok().is(AsmToken::String)) {
11392       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11393           Parser.getTok().getIdentifier());
11394       getParser().getStreamer().emitThumbFunc(Func);
11395       Parser.Lex();
11396       if (parseEOL())
11397         return true;
11398       return false;
11399     }
11400   }
11401 
11402   if (parseEOL())
11403     return true;
11404 
11405   // .thumb_func implies .thumb
11406   if (!isThumb())
11407     SwitchMode();
11408 
11409   getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11410 
11411   NextSymbolIsThumb = true;
11412   return false;
11413 }
11414 
11415 /// parseDirectiveSyntax
11416 ///  ::= .syntax unified | divided
11417 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11418   MCAsmParser &Parser = getParser();
11419   const AsmToken &Tok = Parser.getTok();
11420   if (Tok.isNot(AsmToken::Identifier)) {
11421     Error(L, "unexpected token in .syntax directive");
11422     return false;
11423   }
11424 
11425   StringRef Mode = Tok.getString();
11426   Parser.Lex();
11427   if (check(Mode == "divided" || Mode == "DIVIDED", L,
11428             "'.syntax divided' arm assembly not supported") ||
11429       check(Mode != "unified" && Mode != "UNIFIED", L,
11430             "unrecognized syntax mode in .syntax directive") ||
11431       parseEOL())
11432     return true;
11433 
11434   // TODO tell the MC streamer the mode
11435   // getParser().getStreamer().Emit???();
11436   return false;
11437 }
11438 
11439 /// parseDirectiveCode
11440 ///  ::= .code 16 | 32
11441 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11442   MCAsmParser &Parser = getParser();
11443   const AsmToken &Tok = Parser.getTok();
11444   if (Tok.isNot(AsmToken::Integer))
11445     return Error(L, "unexpected token in .code directive");
11446   int64_t Val = Parser.getTok().getIntVal();
11447   if (Val != 16 && Val != 32) {
11448     Error(L, "invalid operand to .code directive");
11449     return false;
11450   }
11451   Parser.Lex();
11452 
11453   if (parseEOL())
11454     return true;
11455 
11456   if (Val == 16) {
11457     if (!hasThumb())
11458       return Error(L, "target does not support Thumb mode");
11459 
11460     if (!isThumb())
11461       SwitchMode();
11462     getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11463   } else {
11464     if (!hasARM())
11465       return Error(L, "target does not support ARM mode");
11466 
11467     if (isThumb())
11468       SwitchMode();
11469     getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11470   }
11471 
11472   return false;
11473 }
11474 
11475 /// parseDirectiveReq
11476 ///  ::= name .req registername
11477 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11478   MCAsmParser &Parser = getParser();
11479   Parser.Lex(); // Eat the '.req' token.
11480   MCRegister Reg;
11481   SMLoc SRegLoc, ERegLoc;
11482   if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11483             "register name expected") ||
11484       parseEOL())
11485     return true;
11486 
11487   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11488     return Error(SRegLoc,
11489                  "redefinition of '" + Name + "' does not match original.");
11490 
11491   return false;
11492 }
11493 
11494 /// parseDirectiveUneq
11495 ///  ::= .unreq registername
11496 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11497   MCAsmParser &Parser = getParser();
11498   if (Parser.getTok().isNot(AsmToken::Identifier))
11499     return Error(L, "unexpected input in .unreq directive.");
11500   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11501   Parser.Lex(); // Eat the identifier.
11502   return parseEOL();
11503 }
11504 
11505 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11506 // before, if supported by the new target, or emit mapping symbols for the mode
11507 // switch.
11508 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11509   if (WasThumb != isThumb()) {
11510     if (WasThumb && hasThumb()) {
11511       // Stay in Thumb mode
11512       SwitchMode();
11513     } else if (!WasThumb && hasARM()) {
11514       // Stay in ARM mode
11515       SwitchMode();
11516     } else {
11517       // Mode switch forced, because the new arch doesn't support the old mode.
11518       getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11519                                                             : MCAF_Code32);
11520       // Warn about the implcit mode switch. GAS does not switch modes here,
11521       // but instead stays in the old mode, reporting an error on any following
11522       // instructions as the mode does not exist on the target.
11523       Warning(Loc, Twine("new target does not support ") +
11524                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11525                        (!WasThumb ? "thumb" : "arm") + " mode");
11526     }
11527   }
11528 }
11529 
11530 /// parseDirectiveArch
11531 ///  ::= .arch token
11532 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11533   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11534   ARM::ArchKind ID = ARM::parseArch(Arch);
11535 
11536   if (ID == ARM::ArchKind::INVALID)
11537     return Error(L, "Unknown arch name");
11538 
11539   bool WasThumb = isThumb();
11540   Triple T;
11541   MCSubtargetInfo &STI = copySTI();
11542   STI.setDefaultFeatures("", /*TuneCPU*/ "",
11543                          ("+" + ARM::getArchName(ID)).str());
11544   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11545   FixModeAfterArchChange(WasThumb, L);
11546 
11547   getTargetStreamer().emitArch(ID);
11548   return false;
11549 }
11550 
11551 /// parseDirectiveEabiAttr
11552 ///  ::= .eabi_attribute int, int [, "str"]
11553 ///  ::= .eabi_attribute Tag_name, int [, "str"]
11554 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11555   MCAsmParser &Parser = getParser();
11556   int64_t Tag;
11557   SMLoc TagLoc;
11558   TagLoc = Parser.getTok().getLoc();
11559   if (Parser.getTok().is(AsmToken::Identifier)) {
11560     StringRef Name = Parser.getTok().getIdentifier();
11561     std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11562         Name, ARMBuildAttrs::getARMAttributeTags());
11563     if (!Ret) {
11564       Error(TagLoc, "attribute name not recognised: " + Name);
11565       return false;
11566     }
11567     Tag = *Ret;
11568     Parser.Lex();
11569   } else {
11570     const MCExpr *AttrExpr;
11571 
11572     TagLoc = Parser.getTok().getLoc();
11573     if (Parser.parseExpression(AttrExpr))
11574       return true;
11575 
11576     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11577     if (check(!CE, TagLoc, "expected numeric constant"))
11578       return true;
11579 
11580     Tag = CE->getValue();
11581   }
11582 
11583   if (Parser.parseComma())
11584     return true;
11585 
11586   StringRef StringValue = "";
11587   bool IsStringValue = false;
11588 
11589   int64_t IntegerValue = 0;
11590   bool IsIntegerValue = false;
11591 
11592   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
11593     IsStringValue = true;
11594   else if (Tag == ARMBuildAttrs::compatibility) {
11595     IsStringValue = true;
11596     IsIntegerValue = true;
11597   } else if (Tag < 32 || Tag % 2 == 0)
11598     IsIntegerValue = true;
11599   else if (Tag % 2 == 1)
11600     IsStringValue = true;
11601   else
11602     llvm_unreachable("invalid tag type");
11603 
11604   if (IsIntegerValue) {
11605     const MCExpr *ValueExpr;
11606     SMLoc ValueExprLoc = Parser.getTok().getLoc();
11607     if (Parser.parseExpression(ValueExpr))
11608       return true;
11609 
11610     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11611     if (!CE)
11612       return Error(ValueExprLoc, "expected numeric constant");
11613     IntegerValue = CE->getValue();
11614   }
11615 
11616   if (Tag == ARMBuildAttrs::compatibility) {
11617     if (Parser.parseComma())
11618       return true;
11619   }
11620 
11621   std::string EscapedValue;
11622   if (IsStringValue) {
11623     if (Parser.getTok().isNot(AsmToken::String))
11624       return Error(Parser.getTok().getLoc(), "bad string constant");
11625 
11626     if (Tag == ARMBuildAttrs::also_compatible_with) {
11627       if (Parser.parseEscapedString(EscapedValue))
11628         return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11629 
11630       StringValue = EscapedValue;
11631     } else {
11632       StringValue = Parser.getTok().getStringContents();
11633       Parser.Lex();
11634     }
11635   }
11636 
11637   if (Parser.parseEOL())
11638     return true;
11639 
11640   if (IsIntegerValue && IsStringValue) {
11641     assert(Tag == ARMBuildAttrs::compatibility);
11642     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11643   } else if (IsIntegerValue)
11644     getTargetStreamer().emitAttribute(Tag, IntegerValue);
11645   else if (IsStringValue)
11646     getTargetStreamer().emitTextAttribute(Tag, StringValue);
11647   return false;
11648 }
11649 
11650 /// parseDirectiveCPU
11651 ///  ::= .cpu str
11652 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11653   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11654   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11655 
11656   // FIXME: This is using table-gen data, but should be moved to
11657   // ARMTargetParser once that is table-gen'd.
11658   if (!getSTI().isCPUStringValid(CPU))
11659     return Error(L, "Unknown CPU name");
11660 
11661   bool WasThumb = isThumb();
11662   MCSubtargetInfo &STI = copySTI();
11663   STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11664   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11665   FixModeAfterArchChange(WasThumb, L);
11666 
11667   return false;
11668 }
11669 
11670 /// parseDirectiveFPU
11671 ///  ::= .fpu str
11672 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11673   SMLoc FPUNameLoc = getTok().getLoc();
11674   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11675 
11676   ARM::FPUKind ID = ARM::parseFPU(FPU);
11677   std::vector<StringRef> Features;
11678   if (!ARM::getFPUFeatures(ID, Features))
11679     return Error(FPUNameLoc, "Unknown FPU name");
11680 
11681   MCSubtargetInfo &STI = copySTI();
11682   for (auto Feature : Features)
11683     STI.ApplyFeatureFlag(Feature);
11684   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11685 
11686   getTargetStreamer().emitFPU(ID);
11687   return false;
11688 }
11689 
11690 /// parseDirectiveFnStart
11691 ///  ::= .fnstart
11692 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11693   if (parseEOL())
11694     return true;
11695 
11696   if (UC.hasFnStart()) {
11697     Error(L, ".fnstart starts before the end of previous one");
11698     UC.emitFnStartLocNotes();
11699     return true;
11700   }
11701 
11702   // Reset the unwind directives parser state
11703   UC.reset();
11704 
11705   getTargetStreamer().emitFnStart();
11706 
11707   UC.recordFnStart(L);
11708   return false;
11709 }
11710 
11711 /// parseDirectiveFnEnd
11712 ///  ::= .fnend
11713 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11714   if (parseEOL())
11715     return true;
11716   // Check the ordering of unwind directives
11717   if (!UC.hasFnStart())
11718     return Error(L, ".fnstart must precede .fnend directive");
11719 
11720   // Reset the unwind directives parser state
11721   getTargetStreamer().emitFnEnd();
11722 
11723   UC.reset();
11724   return false;
11725 }
11726 
11727 /// parseDirectiveCantUnwind
11728 ///  ::= .cantunwind
11729 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11730   if (parseEOL())
11731     return true;
11732 
11733   UC.recordCantUnwind(L);
11734   // Check the ordering of unwind directives
11735   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11736     return true;
11737 
11738   if (UC.hasHandlerData()) {
11739     Error(L, ".cantunwind can't be used with .handlerdata directive");
11740     UC.emitHandlerDataLocNotes();
11741     return true;
11742   }
11743   if (UC.hasPersonality()) {
11744     Error(L, ".cantunwind can't be used with .personality directive");
11745     UC.emitPersonalityLocNotes();
11746     return true;
11747   }
11748 
11749   getTargetStreamer().emitCantUnwind();
11750   return false;
11751 }
11752 
11753 /// parseDirectivePersonality
11754 ///  ::= .personality name
11755 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11756   MCAsmParser &Parser = getParser();
11757   bool HasExistingPersonality = UC.hasPersonality();
11758 
11759   // Parse the name of the personality routine
11760   if (Parser.getTok().isNot(AsmToken::Identifier))
11761     return Error(L, "unexpected input in .personality directive.");
11762   StringRef Name(Parser.getTok().getIdentifier());
11763   Parser.Lex();
11764 
11765   if (parseEOL())
11766     return true;
11767 
11768   UC.recordPersonality(L);
11769 
11770   // Check the ordering of unwind directives
11771   if (!UC.hasFnStart())
11772     return Error(L, ".fnstart must precede .personality directive");
11773   if (UC.cantUnwind()) {
11774     Error(L, ".personality can't be used with .cantunwind directive");
11775     UC.emitCantUnwindLocNotes();
11776     return true;
11777   }
11778   if (UC.hasHandlerData()) {
11779     Error(L, ".personality must precede .handlerdata directive");
11780     UC.emitHandlerDataLocNotes();
11781     return true;
11782   }
11783   if (HasExistingPersonality) {
11784     Error(L, "multiple personality directives");
11785     UC.emitPersonalityLocNotes();
11786     return true;
11787   }
11788 
11789   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
11790   getTargetStreamer().emitPersonality(PR);
11791   return false;
11792 }
11793 
11794 /// parseDirectiveHandlerData
11795 ///  ::= .handlerdata
11796 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
11797   if (parseEOL())
11798     return true;
11799 
11800   UC.recordHandlerData(L);
11801   // Check the ordering of unwind directives
11802   if (!UC.hasFnStart())
11803     return Error(L, ".fnstart must precede .personality directive");
11804   if (UC.cantUnwind()) {
11805     Error(L, ".handlerdata can't be used with .cantunwind directive");
11806     UC.emitCantUnwindLocNotes();
11807     return true;
11808   }
11809 
11810   getTargetStreamer().emitHandlerData();
11811   return false;
11812 }
11813 
11814 /// parseDirectiveSetFP
11815 ///  ::= .setfp fpreg, spreg [, offset]
11816 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
11817   MCAsmParser &Parser = getParser();
11818   // Check the ordering of unwind directives
11819   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
11820       check(UC.hasHandlerData(), L,
11821             ".setfp must precede .handlerdata directive"))
11822     return true;
11823 
11824   // Parse fpreg
11825   SMLoc FPRegLoc = Parser.getTok().getLoc();
11826   int FPReg = tryParseRegister();
11827 
11828   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
11829       Parser.parseComma())
11830     return true;
11831 
11832   // Parse spreg
11833   SMLoc SPRegLoc = Parser.getTok().getLoc();
11834   int SPReg = tryParseRegister();
11835   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
11836       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11837             "register should be either $sp or the latest fp register"))
11838     return true;
11839 
11840   // Update the frame pointer register
11841   UC.saveFPReg(FPReg);
11842 
11843   // Parse offset
11844   int64_t Offset = 0;
11845   if (Parser.parseOptionalToken(AsmToken::Comma)) {
11846     if (Parser.getTok().isNot(AsmToken::Hash) &&
11847         Parser.getTok().isNot(AsmToken::Dollar))
11848       return Error(Parser.getTok().getLoc(), "'#' expected");
11849     Parser.Lex(); // skip hash token.
11850 
11851     const MCExpr *OffsetExpr;
11852     SMLoc ExLoc = Parser.getTok().getLoc();
11853     SMLoc EndLoc;
11854     if (getParser().parseExpression(OffsetExpr, EndLoc))
11855       return Error(ExLoc, "malformed setfp offset");
11856     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11857     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
11858       return true;
11859     Offset = CE->getValue();
11860   }
11861 
11862   if (Parser.parseEOL())
11863     return true;
11864 
11865   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
11866                                 static_cast<unsigned>(SPReg), Offset);
11867   return false;
11868 }
11869 
11870 /// parseDirective
11871 ///  ::= .pad offset
11872 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
11873   MCAsmParser &Parser = getParser();
11874   // Check the ordering of unwind directives
11875   if (!UC.hasFnStart())
11876     return Error(L, ".fnstart must precede .pad directive");
11877   if (UC.hasHandlerData())
11878     return Error(L, ".pad must precede .handlerdata directive");
11879 
11880   // Parse the offset
11881   if (Parser.getTok().isNot(AsmToken::Hash) &&
11882       Parser.getTok().isNot(AsmToken::Dollar))
11883     return Error(Parser.getTok().getLoc(), "'#' expected");
11884   Parser.Lex(); // skip hash token.
11885 
11886   const MCExpr *OffsetExpr;
11887   SMLoc ExLoc = Parser.getTok().getLoc();
11888   SMLoc EndLoc;
11889   if (getParser().parseExpression(OffsetExpr, EndLoc))
11890     return Error(ExLoc, "malformed pad offset");
11891   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11892   if (!CE)
11893     return Error(ExLoc, "pad offset must be an immediate");
11894 
11895   if (parseEOL())
11896     return true;
11897 
11898   getTargetStreamer().emitPad(CE->getValue());
11899   return false;
11900 }
11901 
11902 /// parseDirectiveRegSave
11903 ///  ::= .save  { registers }
11904 ///  ::= .vsave { registers }
11905 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
11906   // Check the ordering of unwind directives
11907   if (!UC.hasFnStart())
11908     return Error(L, ".fnstart must precede .save or .vsave directives");
11909   if (UC.hasHandlerData())
11910     return Error(L, ".save or .vsave must precede .handlerdata directive");
11911 
11912   // RAII object to make sure parsed operands are deleted.
11913   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
11914 
11915   // Parse the register list
11916   if (parseRegisterList(Operands, true, true) || parseEOL())
11917     return true;
11918   ARMOperand &Op = (ARMOperand &)*Operands[0];
11919   if (!IsVector && !Op.isRegList())
11920     return Error(L, ".save expects GPR registers");
11921   if (IsVector && !Op.isDPRRegList())
11922     return Error(L, ".vsave expects DPR registers");
11923 
11924   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
11925   return false;
11926 }
11927 
11928 /// parseDirectiveInst
11929 ///  ::= .inst opcode [, ...]
11930 ///  ::= .inst.n opcode [, ...]
11931 ///  ::= .inst.w opcode [, ...]
11932 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
11933   int Width = 4;
11934 
11935   if (isThumb()) {
11936     switch (Suffix) {
11937     case 'n':
11938       Width = 2;
11939       break;
11940     case 'w':
11941       break;
11942     default:
11943       Width = 0;
11944       break;
11945     }
11946   } else {
11947     if (Suffix)
11948       return Error(Loc, "width suffixes are invalid in ARM mode");
11949   }
11950 
11951   auto parseOne = [&]() -> bool {
11952     const MCExpr *Expr;
11953     if (getParser().parseExpression(Expr))
11954       return true;
11955     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
11956     if (!Value) {
11957       return Error(Loc, "expected constant expression");
11958     }
11959 
11960     char CurSuffix = Suffix;
11961     switch (Width) {
11962     case 2:
11963       if (Value->getValue() > 0xffff)
11964         return Error(Loc, "inst.n operand is too big, use inst.w instead");
11965       break;
11966     case 4:
11967       if (Value->getValue() > 0xffffffff)
11968         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
11969                               " operand is too big");
11970       break;
11971     case 0:
11972       // Thumb mode, no width indicated. Guess from the opcode, if possible.
11973       if (Value->getValue() < 0xe800)
11974         CurSuffix = 'n';
11975       else if (Value->getValue() >= 0xe8000000)
11976         CurSuffix = 'w';
11977       else
11978         return Error(Loc, "cannot determine Thumb instruction size, "
11979                           "use inst.n/inst.w instead");
11980       break;
11981     default:
11982       llvm_unreachable("only supported widths are 2 and 4");
11983     }
11984 
11985     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
11986     return false;
11987   };
11988 
11989   if (parseOptionalToken(AsmToken::EndOfStatement))
11990     return Error(Loc, "expected expression following directive");
11991   if (parseMany(parseOne))
11992     return true;
11993   return false;
11994 }
11995 
11996 /// parseDirectiveLtorg
11997 ///  ::= .ltorg | .pool
11998 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
11999   if (parseEOL())
12000     return true;
12001   getTargetStreamer().emitCurrentConstantPool();
12002   return false;
12003 }
12004 
12005 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12006   const MCSection *Section = getStreamer().getCurrentSectionOnly();
12007 
12008   if (parseEOL())
12009     return true;
12010 
12011   if (!Section) {
12012     getStreamer().initSections(false, getSTI());
12013     Section = getStreamer().getCurrentSectionOnly();
12014   }
12015 
12016   assert(Section && "must have section to emit alignment");
12017   if (Section->useCodeAlign())
12018     getStreamer().emitCodeAlignment(Align(2), &getSTI());
12019   else
12020     getStreamer().emitValueToAlignment(Align(2));
12021 
12022   return false;
12023 }
12024 
12025 /// parseDirectivePersonalityIndex
12026 ///   ::= .personalityindex index
12027 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12028   MCAsmParser &Parser = getParser();
12029   bool HasExistingPersonality = UC.hasPersonality();
12030 
12031   const MCExpr *IndexExpression;
12032   SMLoc IndexLoc = Parser.getTok().getLoc();
12033   if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12034     return true;
12035   }
12036 
12037   UC.recordPersonalityIndex(L);
12038 
12039   if (!UC.hasFnStart()) {
12040     return Error(L, ".fnstart must precede .personalityindex directive");
12041   }
12042   if (UC.cantUnwind()) {
12043     Error(L, ".personalityindex cannot be used with .cantunwind");
12044     UC.emitCantUnwindLocNotes();
12045     return true;
12046   }
12047   if (UC.hasHandlerData()) {
12048     Error(L, ".personalityindex must precede .handlerdata directive");
12049     UC.emitHandlerDataLocNotes();
12050     return true;
12051   }
12052   if (HasExistingPersonality) {
12053     Error(L, "multiple personality directives");
12054     UC.emitPersonalityLocNotes();
12055     return true;
12056   }
12057 
12058   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12059   if (!CE)
12060     return Error(IndexLoc, "index must be a constant number");
12061   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12062     return Error(IndexLoc,
12063                  "personality routine index should be in range [0-3]");
12064 
12065   getTargetStreamer().emitPersonalityIndex(CE->getValue());
12066   return false;
12067 }
12068 
12069 /// parseDirectiveUnwindRaw
12070 ///   ::= .unwind_raw offset, opcode [, opcode...]
12071 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12072   MCAsmParser &Parser = getParser();
12073   int64_t StackOffset;
12074   const MCExpr *OffsetExpr;
12075   SMLoc OffsetLoc = getLexer().getLoc();
12076 
12077   if (!UC.hasFnStart())
12078     return Error(L, ".fnstart must precede .unwind_raw directives");
12079   if (getParser().parseExpression(OffsetExpr))
12080     return Error(OffsetLoc, "expected expression");
12081 
12082   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12083   if (!CE)
12084     return Error(OffsetLoc, "offset must be a constant");
12085 
12086   StackOffset = CE->getValue();
12087 
12088   if (Parser.parseComma())
12089     return true;
12090 
12091   SmallVector<uint8_t, 16> Opcodes;
12092 
12093   auto parseOne = [&]() -> bool {
12094     const MCExpr *OE = nullptr;
12095     SMLoc OpcodeLoc = getLexer().getLoc();
12096     if (check(getLexer().is(AsmToken::EndOfStatement) ||
12097                   Parser.parseExpression(OE),
12098               OpcodeLoc, "expected opcode expression"))
12099       return true;
12100     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12101     if (!OC)
12102       return Error(OpcodeLoc, "opcode value must be a constant");
12103     const int64_t Opcode = OC->getValue();
12104     if (Opcode & ~0xff)
12105       return Error(OpcodeLoc, "invalid opcode");
12106     Opcodes.push_back(uint8_t(Opcode));
12107     return false;
12108   };
12109 
12110   // Must have at least 1 element
12111   SMLoc OpcodeLoc = getLexer().getLoc();
12112   if (parseOptionalToken(AsmToken::EndOfStatement))
12113     return Error(OpcodeLoc, "expected opcode expression");
12114   if (parseMany(parseOne))
12115     return true;
12116 
12117   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12118   return false;
12119 }
12120 
12121 /// parseDirectiveTLSDescSeq
12122 ///   ::= .tlsdescseq tls-variable
12123 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12124   MCAsmParser &Parser = getParser();
12125 
12126   if (getLexer().isNot(AsmToken::Identifier))
12127     return TokError("expected variable after '.tlsdescseq' directive");
12128 
12129   const MCSymbolRefExpr *SRE =
12130     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
12131                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
12132   Lex();
12133 
12134   if (parseEOL())
12135     return true;
12136 
12137   getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12138   return false;
12139 }
12140 
12141 /// parseDirectiveMovSP
12142 ///  ::= .movsp reg [, #offset]
12143 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12144   MCAsmParser &Parser = getParser();
12145   if (!UC.hasFnStart())
12146     return Error(L, ".fnstart must precede .movsp directives");
12147   if (UC.getFPReg() != ARM::SP)
12148     return Error(L, "unexpected .movsp directive");
12149 
12150   SMLoc SPRegLoc = Parser.getTok().getLoc();
12151   int SPReg = tryParseRegister();
12152   if (SPReg == -1)
12153     return Error(SPRegLoc, "register expected");
12154   if (SPReg == ARM::SP || SPReg == ARM::PC)
12155     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12156 
12157   int64_t Offset = 0;
12158   if (Parser.parseOptionalToken(AsmToken::Comma)) {
12159     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12160       return true;
12161 
12162     const MCExpr *OffsetExpr;
12163     SMLoc OffsetLoc = Parser.getTok().getLoc();
12164 
12165     if (Parser.parseExpression(OffsetExpr))
12166       return Error(OffsetLoc, "malformed offset expression");
12167 
12168     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12169     if (!CE)
12170       return Error(OffsetLoc, "offset must be an immediate constant");
12171 
12172     Offset = CE->getValue();
12173   }
12174 
12175   if (parseEOL())
12176     return true;
12177 
12178   getTargetStreamer().emitMovSP(SPReg, Offset);
12179   UC.saveFPReg(SPReg);
12180 
12181   return false;
12182 }
12183 
12184 /// parseDirectiveObjectArch
12185 ///   ::= .object_arch name
12186 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12187   MCAsmParser &Parser = getParser();
12188   if (getLexer().isNot(AsmToken::Identifier))
12189     return Error(getLexer().getLoc(), "unexpected token");
12190 
12191   StringRef Arch = Parser.getTok().getString();
12192   SMLoc ArchLoc = Parser.getTok().getLoc();
12193   Lex();
12194 
12195   ARM::ArchKind ID = ARM::parseArch(Arch);
12196 
12197   if (ID == ARM::ArchKind::INVALID)
12198     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12199   if (parseToken(AsmToken::EndOfStatement))
12200     return true;
12201 
12202   getTargetStreamer().emitObjectArch(ID);
12203   return false;
12204 }
12205 
12206 /// parseDirectiveAlign
12207 ///   ::= .align
12208 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12209   // NOTE: if this is not the end of the statement, fall back to the target
12210   // agnostic handling for this directive which will correctly handle this.
12211   if (parseOptionalToken(AsmToken::EndOfStatement)) {
12212     // '.align' is target specifically handled to mean 2**2 byte alignment.
12213     const MCSection *Section = getStreamer().getCurrentSectionOnly();
12214     assert(Section && "must have section to emit alignment");
12215     if (Section->useCodeAlign())
12216       getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12217     else
12218       getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12219     return false;
12220   }
12221   return true;
12222 }
12223 
12224 /// parseDirectiveThumbSet
12225 ///  ::= .thumb_set name, value
12226 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12227   MCAsmParser &Parser = getParser();
12228 
12229   StringRef Name;
12230   if (check(Parser.parseIdentifier(Name),
12231             "expected identifier after '.thumb_set'") ||
12232       Parser.parseComma())
12233     return true;
12234 
12235   MCSymbol *Sym;
12236   const MCExpr *Value;
12237   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12238                                                Parser, Sym, Value))
12239     return true;
12240 
12241   getTargetStreamer().emitThumbSet(Sym, Value);
12242   return false;
12243 }
12244 
12245 /// parseDirectiveSEHAllocStack
12246 /// ::= .seh_stackalloc
12247 /// ::= .seh_stackalloc_w
12248 bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12249   int64_t Size;
12250   if (parseImmExpr(Size))
12251     return true;
12252   getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12253   return false;
12254 }
12255 
12256 /// parseDirectiveSEHSaveRegs
12257 /// ::= .seh_save_regs
12258 /// ::= .seh_save_regs_w
12259 bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12260   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12261 
12262   if (parseRegisterList(Operands) || parseEOL())
12263     return true;
12264   ARMOperand &Op = (ARMOperand &)*Operands[0];
12265   if (!Op.isRegList())
12266     return Error(L, ".seh_save_regs{_w} expects GPR registers");
12267   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12268   uint32_t Mask = 0;
12269   for (size_t i = 0; i < RegList.size(); ++i) {
12270     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12271     if (Reg == 15) // pc -> lr
12272       Reg = 14;
12273     if (Reg == 13)
12274       return Error(L, ".seh_save_regs{_w} can't include SP");
12275     assert(Reg < 16U && "Register out of range");
12276     unsigned Bit = (1u << Reg);
12277     Mask |= Bit;
12278   }
12279   if (!Wide && (Mask & 0x1f00) != 0)
12280     return Error(L,
12281                  ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12282   getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12283   return false;
12284 }
12285 
12286 /// parseDirectiveSEHSaveSP
12287 /// ::= .seh_save_sp
12288 bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12289   int Reg = tryParseRegister();
12290   if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12291     return Error(L, "expected GPR");
12292   unsigned Index = MRI->getEncodingValue(Reg);
12293   if (Index > 14 || Index == 13)
12294     return Error(L, "invalid register for .seh_save_sp");
12295   getTargetStreamer().emitARMWinCFISaveSP(Index);
12296   return false;
12297 }
12298 
12299 /// parseDirectiveSEHSaveFRegs
12300 /// ::= .seh_save_fregs
12301 bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12302   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
12303 
12304   if (parseRegisterList(Operands) || parseEOL())
12305     return true;
12306   ARMOperand &Op = (ARMOperand &)*Operands[0];
12307   if (!Op.isDPRRegList())
12308     return Error(L, ".seh_save_fregs expects DPR registers");
12309   const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12310   uint32_t Mask = 0;
12311   for (size_t i = 0; i < RegList.size(); ++i) {
12312     unsigned Reg = MRI->getEncodingValue(RegList[i]);
12313     assert(Reg < 32U && "Register out of range");
12314     unsigned Bit = (1u << Reg);
12315     Mask |= Bit;
12316   }
12317 
12318   if (Mask == 0)
12319     return Error(L, ".seh_save_fregs missing registers");
12320 
12321   unsigned First = 0;
12322   while ((Mask & 1) == 0) {
12323     First++;
12324     Mask >>= 1;
12325   }
12326   if (((Mask + 1) & Mask) != 0)
12327     return Error(L,
12328                  ".seh_save_fregs must take a contiguous range of registers");
12329   unsigned Last = First;
12330   while ((Mask & 2) != 0) {
12331     Last++;
12332     Mask >>= 1;
12333   }
12334   if (First < 16 && Last >= 16)
12335     return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12336   getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12337   return false;
12338 }
12339 
12340 /// parseDirectiveSEHSaveLR
12341 /// ::= .seh_save_lr
12342 bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12343   int64_t Offset;
12344   if (parseImmExpr(Offset))
12345     return true;
12346   getTargetStreamer().emitARMWinCFISaveLR(Offset);
12347   return false;
12348 }
12349 
12350 /// parseDirectiveSEHPrologEnd
12351 /// ::= .seh_endprologue
12352 /// ::= .seh_endprologue_fragment
12353 bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12354   getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12355   return false;
12356 }
12357 
12358 /// parseDirectiveSEHNop
12359 /// ::= .seh_nop
12360 /// ::= .seh_nop_w
12361 bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12362   getTargetStreamer().emitARMWinCFINop(Wide);
12363   return false;
12364 }
12365 
12366 /// parseDirectiveSEHEpilogStart
12367 /// ::= .seh_startepilogue
12368 /// ::= .seh_startepilogue_cond
12369 bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12370   unsigned CC = ARMCC::AL;
12371   if (Condition) {
12372     MCAsmParser &Parser = getParser();
12373     SMLoc S = Parser.getTok().getLoc();
12374     const AsmToken &Tok = Parser.getTok();
12375     if (!Tok.is(AsmToken::Identifier))
12376       return Error(S, ".seh_startepilogue_cond missing condition");
12377     CC = ARMCondCodeFromString(Tok.getString());
12378     if (CC == ~0U)
12379       return Error(S, "invalid condition");
12380     Parser.Lex(); // Eat the token.
12381   }
12382 
12383   getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12384   return false;
12385 }
12386 
12387 /// parseDirectiveSEHEpilogEnd
12388 /// ::= .seh_endepilogue
12389 bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12390   getTargetStreamer().emitARMWinCFIEpilogEnd();
12391   return false;
12392 }
12393 
12394 /// parseDirectiveSEHCustom
12395 /// ::= .seh_custom
12396 bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12397   unsigned Opcode = 0;
12398   do {
12399     int64_t Byte;
12400     if (parseImmExpr(Byte))
12401       return true;
12402     if (Byte > 0xff || Byte < 0)
12403       return Error(L, "Invalid byte value in .seh_custom");
12404     if (Opcode > 0x00ffffff)
12405       return Error(L, "Too many bytes in .seh_custom");
12406     // Store the bytes as one big endian number in Opcode. In a multi byte
12407     // opcode sequence, the first byte can't be zero.
12408     Opcode = (Opcode << 8) | Byte;
12409   } while (parseOptionalToken(AsmToken::Comma));
12410   getTargetStreamer().emitARMWinCFICustom(Opcode);
12411   return false;
12412 }
12413 
12414 /// Force static initialization.
12415 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser() {
12416   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
12417   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
12418   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
12419   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
12420 }
12421 
12422 #define GET_REGISTER_MATCHER
12423 #define GET_SUBTARGET_FEATURE_NAME
12424 #define GET_MATCHER_IMPLEMENTATION
12425 #define GET_MNEMONIC_SPELL_CHECKER
12426 #include "ARMGenAsmMatcher.inc"
12427 
12428 // Some diagnostics need to vary with subtarget features, so they are handled
12429 // here. For example, the DPR class has either 16 or 32 registers, depending
12430 // on the FPU available.
12431 const char *
12432 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12433   switch (MatchError) {
12434   // rGPR contains sp starting with ARMv8.
12435   case Match_rGPR:
12436     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12437                       : "operand must be a register in range [r0, r12] or r14";
12438   // DPR contains 16 registers for some FPUs, and 32 for others.
12439   case Match_DPR:
12440     return hasD32() ? "operand must be a register in range [d0, d31]"
12441                     : "operand must be a register in range [d0, d15]";
12442   case Match_DPR_RegList:
12443     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12444                     : "operand must be a list of registers in range [d0, d15]";
12445 
12446   // For all other diags, use the static string from tablegen.
12447   default:
12448     return getMatchKindDiag(MatchError);
12449   }
12450 }
12451 
12452 // Process the list of near-misses, throwing away ones we don't want to report
12453 // to the user, and converting the rest to a source location and string that
12454 // should be reported.
12455 void
12456 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12457                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
12458                                SMLoc IDLoc, OperandVector &Operands) {
12459   // TODO: If operand didn't match, sub in a dummy one and run target
12460   // predicate, so that we can avoid reporting near-misses that are invalid?
12461   // TODO: Many operand types dont have SuperClasses set, so we report
12462   // redundant ones.
12463   // TODO: Some operands are superclasses of registers (e.g.
12464   // MCK_RegShiftedImm), we don't have any way to represent that currently.
12465   // TODO: This is not all ARM-specific, can some of it be factored out?
12466 
12467   // Record some information about near-misses that we have already seen, so
12468   // that we can avoid reporting redundant ones. For example, if there are
12469   // variants of an instruction that take 8- and 16-bit immediates, we want
12470   // to only report the widest one.
12471   std::multimap<unsigned, unsigned> OperandMissesSeen;
12472   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12473   bool ReportedTooFewOperands = false;
12474 
12475   // Process the near-misses in reverse order, so that we see more general ones
12476   // first, and so can avoid emitting more specific ones.
12477   for (NearMissInfo &I : reverse(NearMissesIn)) {
12478     switch (I.getKind()) {
12479     case NearMissInfo::NearMissOperand: {
12480       SMLoc OperandLoc =
12481           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12482       const char *OperandDiag =
12483           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12484 
12485       // If we have already emitted a message for a superclass, don't also report
12486       // the sub-class. We consider all operand classes that we don't have a
12487       // specialised diagnostic for to be equal for the propose of this check,
12488       // so that we don't report the generic error multiple times on the same
12489       // operand.
12490       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12491       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12492       if (std::any_of(PrevReports.first, PrevReports.second,
12493                       [DupCheckMatchClass](
12494                           const std::pair<unsigned, unsigned> Pair) {
12495             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12496               return Pair.second == DupCheckMatchClass;
12497             else
12498               return isSubclass((MatchClassKind)DupCheckMatchClass,
12499                                 (MatchClassKind)Pair.second);
12500           }))
12501         break;
12502       OperandMissesSeen.insert(
12503           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12504 
12505       NearMissMessage Message;
12506       Message.Loc = OperandLoc;
12507       if (OperandDiag) {
12508         Message.Message = OperandDiag;
12509       } else if (I.getOperandClass() == InvalidMatchClass) {
12510         Message.Message = "too many operands for instruction";
12511       } else {
12512         Message.Message = "invalid operand for instruction";
12513         LLVM_DEBUG(
12514             dbgs() << "Missing diagnostic string for operand class "
12515                    << getMatchClassName((MatchClassKind)I.getOperandClass())
12516                    << I.getOperandClass() << ", error " << I.getOperandError()
12517                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12518       }
12519       NearMissesOut.emplace_back(Message);
12520       break;
12521     }
12522     case NearMissInfo::NearMissFeature: {
12523       const FeatureBitset &MissingFeatures = I.getFeatures();
12524       // Don't report the same set of features twice.
12525       if (FeatureMissesSeen.count(MissingFeatures))
12526         break;
12527       FeatureMissesSeen.insert(MissingFeatures);
12528 
12529       // Special case: don't report a feature set which includes arm-mode for
12530       // targets that don't have ARM mode.
12531       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12532         break;
12533       // Don't report any near-misses that both require switching instruction
12534       // set, and adding other subtarget features.
12535       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12536           MissingFeatures.count() > 1)
12537         break;
12538       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12539           MissingFeatures.count() > 1)
12540         break;
12541       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12542           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12543                                              Feature_IsThumbBit})).any())
12544         break;
12545       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12546         break;
12547 
12548       NearMissMessage Message;
12549       Message.Loc = IDLoc;
12550       raw_svector_ostream OS(Message.Message);
12551 
12552       OS << "instruction requires:";
12553       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12554         if (MissingFeatures.test(i))
12555           OS << ' ' << getSubtargetFeatureName(i);
12556 
12557       NearMissesOut.emplace_back(Message);
12558 
12559       break;
12560     }
12561     case NearMissInfo::NearMissPredicate: {
12562       NearMissMessage Message;
12563       Message.Loc = IDLoc;
12564       switch (I.getPredicateError()) {
12565       case Match_RequiresNotITBlock:
12566         Message.Message = "flag setting instruction only valid outside IT block";
12567         break;
12568       case Match_RequiresITBlock:
12569         Message.Message = "instruction only valid inside IT block";
12570         break;
12571       case Match_RequiresV6:
12572         Message.Message = "instruction variant requires ARMv6 or later";
12573         break;
12574       case Match_RequiresThumb2:
12575         Message.Message = "instruction variant requires Thumb2";
12576         break;
12577       case Match_RequiresV8:
12578         Message.Message = "instruction variant requires ARMv8 or later";
12579         break;
12580       case Match_RequiresFlagSetting:
12581         Message.Message = "no flag-preserving variant of this instruction available";
12582         break;
12583       case Match_InvalidOperand:
12584         Message.Message = "invalid operand for instruction";
12585         break;
12586       default:
12587         llvm_unreachable("Unhandled target predicate error");
12588         break;
12589       }
12590       NearMissesOut.emplace_back(Message);
12591       break;
12592     }
12593     case NearMissInfo::NearMissTooFewOperands: {
12594       if (!ReportedTooFewOperands) {
12595         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12596         NearMissesOut.emplace_back(NearMissMessage{
12597             EndLoc, StringRef("too few operands for instruction")});
12598         ReportedTooFewOperands = true;
12599       }
12600       break;
12601     }
12602     case NearMissInfo::NoNearMiss:
12603       // This should never leave the matcher.
12604       llvm_unreachable("not a near-miss");
12605       break;
12606     }
12607   }
12608 }
12609 
12610 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12611                                     SMLoc IDLoc, OperandVector &Operands) {
12612   SmallVector<NearMissMessage, 4> Messages;
12613   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12614 
12615   if (Messages.size() == 0) {
12616     // No near-misses were found, so the best we can do is "invalid
12617     // instruction".
12618     Error(IDLoc, "invalid instruction");
12619   } else if (Messages.size() == 1) {
12620     // One near miss was found, report it as the sole error.
12621     Error(Messages[0].Loc, Messages[0].Message);
12622   } else {
12623     // More than one near miss, so report a generic "invalid instruction"
12624     // error, followed by notes for each of the near-misses.
12625     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12626     for (auto &M : Messages) {
12627       Note(M.Loc, M.Message);
12628     }
12629   }
12630 }
12631 
12632 bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12633   // FIXME: This structure should be moved inside ARMTargetParser
12634   // when we start to table-generate them, and we can use the ARM
12635   // flags below, that were generated by table-gen.
12636   static const struct {
12637     const uint64_t Kind;
12638     const FeatureBitset ArchCheck;
12639     const FeatureBitset Features;
12640   } Extensions[] = {
12641       {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12642       {ARM::AEK_AES,
12643        {Feature_HasV8Bit},
12644        {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12645       {ARM::AEK_SHA2,
12646        {Feature_HasV8Bit},
12647        {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12648       {ARM::AEK_CRYPTO,
12649        {Feature_HasV8Bit},
12650        {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12651       {ARM::AEK_FP,
12652        {Feature_HasV8Bit},
12653        {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12654       {(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
12655        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12656        {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12657       {ARM::AEK_MP,
12658        {Feature_HasV7Bit, Feature_IsNotMClassBit},
12659        {ARM::FeatureMP}},
12660       {ARM::AEK_SIMD,
12661        {Feature_HasV8Bit},
12662        {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12663       {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12664       // FIXME: Only available in A-class, isel not predicated
12665       {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12666       {ARM::AEK_FP16,
12667        {Feature_HasV8_2aBit},
12668        {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12669       {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12670       {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12671       {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12672       // FIXME: Unsupported extensions.
12673       {ARM::AEK_OS, {}, {}},
12674       {ARM::AEK_IWMMXT, {}, {}},
12675       {ARM::AEK_IWMMXT2, {}, {}},
12676       {ARM::AEK_MAVERICK, {}, {}},
12677       {ARM::AEK_XSCALE, {}, {}},
12678   };
12679   bool EnableFeature = true;
12680   if (Name.starts_with_insensitive("no")) {
12681     EnableFeature = false;
12682     Name = Name.substr(2);
12683   }
12684   uint64_t FeatureKind = ARM::parseArchExt(Name);
12685   if (FeatureKind == ARM::AEK_INVALID)
12686     return Error(ExtLoc, "unknown architectural extension: " + Name);
12687 
12688   for (const auto &Extension : Extensions) {
12689     if (Extension.Kind != FeatureKind)
12690       continue;
12691 
12692     if (Extension.Features.none())
12693       return Error(ExtLoc, "unsupported architectural extension: " + Name);
12694 
12695     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12696       return Error(ExtLoc, "architectural extension '" + Name +
12697                                "' is not "
12698                                "allowed for the current base architecture");
12699 
12700     MCSubtargetInfo &STI = copySTI();
12701     if (EnableFeature) {
12702       STI.SetFeatureBitsTransitively(Extension.Features);
12703     } else {
12704       STI.ClearFeatureBitsTransitively(Extension.Features);
12705     }
12706     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12707     setAvailableFeatures(Features);
12708     return true;
12709   }
12710   return false;
12711 }
12712 
12713 /// parseDirectiveArchExtension
12714 ///   ::= .arch_extension [no]feature
12715 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12716 
12717   MCAsmParser &Parser = getParser();
12718 
12719   if (getLexer().isNot(AsmToken::Identifier))
12720     return Error(getLexer().getLoc(), "expected architecture extension name");
12721 
12722   StringRef Name = Parser.getTok().getString();
12723   SMLoc ExtLoc = Parser.getTok().getLoc();
12724   Lex();
12725 
12726   if (parseEOL())
12727     return true;
12728 
12729   if (Name == "nocrypto") {
12730     enableArchExtFeature("nosha2", ExtLoc);
12731     enableArchExtFeature("noaes", ExtLoc);
12732   }
12733 
12734   if (enableArchExtFeature(Name, ExtLoc))
12735     return false;
12736 
12737   return Error(ExtLoc, "unknown architectural extension: " + Name);
12738 }
12739 
12740 // Define this matcher function after the auto-generated include so we
12741 // have the match class enum definitions.
12742 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12743                                                   unsigned Kind) {
12744   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12745   // If the kind is a token for a literal immediate, check if our asm
12746   // operand matches. This is for InstAliases which have a fixed-value
12747   // immediate in the syntax.
12748   switch (Kind) {
12749   default: break;
12750   case MCK__HASH_0:
12751     if (Op.isImm())
12752       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12753         if (CE->getValue() == 0)
12754           return Match_Success;
12755     break;
12756   case MCK__HASH_8:
12757     if (Op.isImm())
12758       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12759         if (CE->getValue() == 8)
12760           return Match_Success;
12761     break;
12762   case MCK__HASH_16:
12763     if (Op.isImm())
12764       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12765         if (CE->getValue() == 16)
12766           return Match_Success;
12767     break;
12768   case MCK_ModImm:
12769     if (Op.isImm()) {
12770       const MCExpr *SOExpr = Op.getImm();
12771       int64_t Value;
12772       if (!SOExpr->evaluateAsAbsolute(Value))
12773         return Match_Success;
12774       assert((Value >= std::numeric_limits<int32_t>::min() &&
12775               Value <= std::numeric_limits<uint32_t>::max()) &&
12776              "expression value must be representable in 32 bits");
12777     }
12778     break;
12779   case MCK_rGPR:
12780     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
12781       return Match_Success;
12782     return Match_rGPR;
12783   case MCK_GPRPair:
12784     if (Op.isReg() &&
12785         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
12786       return Match_Success;
12787     break;
12788   }
12789   return Match_InvalidOperand;
12790 }
12791 
12792 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
12793                                            StringRef ExtraToken) {
12794   if (!hasMVE())
12795     return false;
12796 
12797   if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
12798       (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
12799       (Mnemonic.startswith("vmov") &&
12800        !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
12801          ExtraToken == ".8")) ||
12802       (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
12803       (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi"))
12804     return true;
12805 
12806   const char *predicable_prefixes[] = {
12807       "vabav",      "vabd",     "vabs",      "vadc",       "vadd",
12808       "vaddlv",     "vaddv",    "vand",      "vbic",       "vbrsr",
12809       "vcadd",      "vcls",     "vclz",      "vcmla",      "vcmp",
12810       "vcmul",      "vctp",     "vcvt",      "vddup",      "vdup",
12811       "vdwdup",     "veor",     "vfma",      "vfmas",      "vfms",
12812       "vhadd",      "vhcadd",   "vhsub",     "vidup",      "viwdup",
12813       "vldrb",      "vldrd",    "vldrw",     "vmax",       "vmaxa",
12814       "vmaxav",     "vmaxnm",   "vmaxnma",   "vmaxnmav",   "vmaxnmv",
12815       "vmaxv",      "vmin",     "vminav",    "vminnm",     "vminnmav",
12816       "vminnmv",    "vminv",    "vmla",      "vmladav",    "vmlaldav",
12817       "vmlalv",     "vmlas",    "vmlav",     "vmlsdav",    "vmlsldav",
12818       "vmovlb",     "vmovlt",   "vmovnb",    "vmovnt",     "vmul",
12819       "vmvn",       "vneg",     "vorn",      "vorr",       "vpnot",
12820       "vpsel",      "vqabs",    "vqadd",     "vqdmladh",   "vqdmlah",
12821       "vqdmlash",   "vqdmlsdh", "vqdmulh",   "vqdmull",    "vqmovn",
12822       "vqmovun",    "vqneg",    "vqrdmladh", "vqrdmlah",   "vqrdmlash",
12823       "vqrdmlsdh",  "vqrdmulh", "vqrshl",    "vqrshrn",    "vqrshrun",
12824       "vqshl",      "vqshrn",   "vqshrun",   "vqsub",      "vrev16",
12825       "vrev32",     "vrev64",   "vrhadd",    "vrmlaldavh", "vrmlalvh",
12826       "vrmlsldavh", "vrmulh",   "vrshl",     "vrshr",      "vrshrn",
12827       "vsbc",       "vshl",     "vshlc",     "vshll",      "vshr",
12828       "vshrn",      "vsli",     "vsri",      "vstrb",      "vstrd",
12829       "vstrw",      "vsub"};
12830 
12831   return std::any_of(
12832       std::begin(predicable_prefixes), std::end(predicable_prefixes),
12833       [&Mnemonic](const char *prefix) { return Mnemonic.startswith(prefix); });
12834 }
12835