1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11 
12 #include "MCTargetDesc/X86IntelInstPrinter.h"
13 #include "MCTargetDesc/X86MCTargetDesc.h"
14 #include "X86AsmParserCommon.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/MC/MCSymbol.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/SMLoc.h"
24 #include <cassert>
25 #include <memory>
26 
27 namespace llvm {
28 
29 /// X86Operand - Instances of this class represent a parsed X86 machine
30 /// instruction.
31 struct X86Operand final : public MCParsedAsmOperand {
32   enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
33 
34   SMLoc StartLoc, EndLoc;
35   SMLoc OffsetOfLoc;
36   StringRef SymName;
37   void *OpDecl;
38   bool AddressOf;
39 
40   /// This used for inline asm which may specify base reg and index reg for
41   /// MemOp. e.g. ARR[eax + ecx*4], so no extra reg can be used for MemOp.
42   bool UseUpRegs = false;
43 
44   struct TokOp {
45     const char *Data;
46     unsigned Length;
47   };
48 
49   struct RegOp {
50     unsigned RegNo;
51   };
52 
53   struct PrefOp {
54     unsigned Prefixes;
55   };
56 
57   struct ImmOp {
58     const MCExpr *Val;
59     bool LocalRef;
60   };
61 
62   struct MemOp {
63     unsigned SegReg;
64     const MCExpr *Disp;
65     unsigned BaseReg;
66     unsigned DefaultBaseReg;
67     unsigned IndexReg;
68     unsigned Scale;
69     unsigned Size;
70     unsigned ModeSize;
71 
72     /// If the memory operand is unsized and there are multiple instruction
73     /// matches, prefer the one with this size.
74     unsigned FrontendSize;
75 
76     /// If false, then this operand must be a memory operand for an indirect
77     /// branch instruction. Otherwise, this operand may belong to either a
78     /// direct or indirect branch instruction.
79     bool MaybeDirectBranchDest;
80   };
81 
82   union {
83     struct TokOp Tok;
84     struct RegOp Reg;
85     struct ImmOp Imm;
86     struct MemOp Mem;
87     struct PrefOp Pref;
88   };
89 
90   X86Operand(KindTy K, SMLoc Start, SMLoc End)
91       : Kind(K), StartLoc(Start), EndLoc(End), OpDecl(nullptr),
92         AddressOf(false) {}
93 
94   StringRef getSymName() override { return SymName; }
95   void *getOpDecl() override { return OpDecl; }
96 
97   /// getStartLoc - Get the location of the first token of this operand.
98   SMLoc getStartLoc() const override { return StartLoc; }
99 
100   /// getEndLoc - Get the location of the last token of this operand.
101   SMLoc getEndLoc() const override { return EndLoc; }
102 
103   /// getLocRange - Get the range between the first and last token of this
104   /// operand.
105   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
106 
107   /// getOffsetOfLoc - Get the location of the offset operator.
108   SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
109 
110   void print(raw_ostream &OS) const override {
111 
112     auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
113       if (Val->getKind() == MCExpr::Constant) {
114         if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
115           OS << VName << Imm;
116       } else if (Val->getKind() == MCExpr::SymbolRef) {
117         if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
118           const MCSymbol &Sym = SRE->getSymbol();
119           if (const char *SymNameStr = Sym.getName().data())
120             OS << VName << SymNameStr;
121         }
122       }
123     };
124 
125     switch (Kind) {
126     case Token:
127       OS << Tok.Data;
128       break;
129     case Register:
130       OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo);
131       break;
132     case DXRegister:
133       OS << "DXReg";
134       break;
135     case Immediate:
136       PrintImmValue(Imm.Val, "Imm:");
137       break;
138     case Prefix:
139       OS << "Prefix:" << Pref.Prefixes;
140       break;
141     case Memory:
142       OS << "Memory: ModeSize=" << Mem.ModeSize;
143       if (Mem.Size)
144         OS << ",Size=" << Mem.Size;
145       if (Mem.BaseReg)
146         OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg);
147       if (Mem.IndexReg)
148         OS << ",IndexReg="
149            << X86IntelInstPrinter::getRegisterName(Mem.IndexReg);
150       if (Mem.Scale)
151         OS << ",Scale=" << Mem.Scale;
152       if (Mem.Disp)
153         PrintImmValue(Mem.Disp, ",Disp=");
154       if (Mem.SegReg)
155         OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg);
156       break;
157     }
158   }
159 
160   StringRef getToken() const {
161     assert(Kind == Token && "Invalid access!");
162     return StringRef(Tok.Data, Tok.Length);
163   }
164   void setTokenValue(StringRef Value) {
165     assert(Kind == Token && "Invalid access!");
166     Tok.Data = Value.data();
167     Tok.Length = Value.size();
168   }
169 
170   unsigned getReg() const override {
171     assert(Kind == Register && "Invalid access!");
172     return Reg.RegNo;
173   }
174 
175   unsigned getPrefix() const {
176     assert(Kind == Prefix && "Invalid access!");
177     return Pref.Prefixes;
178   }
179 
180   const MCExpr *getImm() const {
181     assert(Kind == Immediate && "Invalid access!");
182     return Imm.Val;
183   }
184 
185   const MCExpr *getMemDisp() const {
186     assert(Kind == Memory && "Invalid access!");
187     return Mem.Disp;
188   }
189   unsigned getMemSegReg() const {
190     assert(Kind == Memory && "Invalid access!");
191     return Mem.SegReg;
192   }
193   unsigned getMemBaseReg() const {
194     assert(Kind == Memory && "Invalid access!");
195     return Mem.BaseReg;
196   }
197   unsigned getMemDefaultBaseReg() const {
198     assert(Kind == Memory && "Invalid access!");
199     return Mem.DefaultBaseReg;
200   }
201   unsigned getMemIndexReg() const {
202     assert(Kind == Memory && "Invalid access!");
203     return Mem.IndexReg;
204   }
205   unsigned getMemScale() const {
206     assert(Kind == Memory && "Invalid access!");
207     return Mem.Scale;
208   }
209   unsigned getMemModeSize() const {
210     assert(Kind == Memory && "Invalid access!");
211     return Mem.ModeSize;
212   }
213   unsigned getMemFrontendSize() const {
214     assert(Kind == Memory && "Invalid access!");
215     return Mem.FrontendSize;
216   }
217   bool isMaybeDirectBranchDest() const {
218     assert(Kind == Memory && "Invalid access!");
219     return Mem.MaybeDirectBranchDest;
220   }
221 
222   bool isToken() const override {return Kind == Token; }
223 
224   bool isImm() const override { return Kind == Immediate; }
225 
226   bool isImmSExti16i8() const {
227     if (!isImm())
228       return false;
229 
230     // If this isn't a constant expr, just assume it fits and let relaxation
231     // handle it.
232     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
233     if (!CE)
234       return true;
235 
236     // Otherwise, check the value is in a range that makes sense for this
237     // extension.
238     return isImmSExti16i8Value(CE->getValue());
239   }
240   bool isImmSExti32i8() const {
241     if (!isImm())
242       return false;
243 
244     // If this isn't a constant expr, just assume it fits and let relaxation
245     // handle it.
246     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
247     if (!CE)
248       return true;
249 
250     // Otherwise, check the value is in a range that makes sense for this
251     // extension.
252     return isImmSExti32i8Value(CE->getValue());
253   }
254   bool isImmSExti64i8() const {
255     if (!isImm())
256       return false;
257 
258     // If this isn't a constant expr, just assume it fits and let relaxation
259     // handle it.
260     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
261     if (!CE)
262       return true;
263 
264     // Otherwise, check the value is in a range that makes sense for this
265     // extension.
266     return isImmSExti64i8Value(CE->getValue());
267   }
268   bool isImmSExti64i32() const {
269     if (!isImm())
270       return false;
271 
272     // If this isn't a constant expr, just assume it fits and let relaxation
273     // handle it.
274     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
275     if (!CE)
276       return true;
277 
278     // Otherwise, check the value is in a range that makes sense for this
279     // extension.
280     return isImmSExti64i32Value(CE->getValue());
281   }
282 
283   bool isImmUnsignedi4() const {
284     if (!isImm()) return false;
285     // If this isn't a constant expr, reject it. The immediate byte is shared
286     // with a register encoding. We can't have it affected by a relocation.
287     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
288     if (!CE) return false;
289     return isImmUnsignedi4Value(CE->getValue());
290   }
291 
292   bool isImmUnsignedi8() const {
293     if (!isImm()) return false;
294     // If this isn't a constant expr, just assume it fits and let relaxation
295     // handle it.
296     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
297     if (!CE) return true;
298     return isImmUnsignedi8Value(CE->getValue());
299   }
300 
301   bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
302 
303   bool needAddressOf() const override { return AddressOf; }
304 
305   bool isMem() const override { return Kind == Memory; }
306   bool isMemUnsized() const {
307     return Kind == Memory && Mem.Size == 0;
308   }
309   bool isMem8() const {
310     return Kind == Memory && (!Mem.Size || Mem.Size == 8);
311   }
312   bool isMem16() const {
313     return Kind == Memory && (!Mem.Size || Mem.Size == 16);
314   }
315   bool isMem32() const {
316     return Kind == Memory && (!Mem.Size || Mem.Size == 32);
317   }
318   bool isMem64() const {
319     return Kind == Memory && (!Mem.Size || Mem.Size == 64);
320   }
321   bool isMem80() const {
322     return Kind == Memory && (!Mem.Size || Mem.Size == 80);
323   }
324   bool isMem128() const {
325     return Kind == Memory && (!Mem.Size || Mem.Size == 128);
326   }
327   bool isMem256() const {
328     return Kind == Memory && (!Mem.Size || Mem.Size == 256);
329   }
330   bool isMem512() const {
331     return Kind == Memory && (!Mem.Size || Mem.Size == 512);
332   }
333 
334   bool isSibMem() const {
335     return isMem() && Mem.BaseReg != X86::RIP && Mem.BaseReg != X86::EIP;
336   }
337 
338   bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
339     assert(Kind == Memory && "Invalid access!");
340     return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
341   }
342 
343   bool isMem64_RC128() const {
344     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
345   }
346   bool isMem128_RC128() const {
347     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
348   }
349   bool isMem128_RC256() const {
350     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
351   }
352   bool isMem256_RC128() const {
353     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
354   }
355   bool isMem256_RC256() const {
356     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
357   }
358 
359   bool isMem64_RC128X() const {
360     return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
361   }
362   bool isMem128_RC128X() const {
363     return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
364   }
365   bool isMem128_RC256X() const {
366     return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
367   }
368   bool isMem256_RC128X() const {
369     return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
370   }
371   bool isMem256_RC256X() const {
372     return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
373   }
374   bool isMem256_RC512() const {
375     return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
376   }
377   bool isMem512_RC256X() const {
378     return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
379   }
380   bool isMem512_RC512() const {
381     return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
382   }
383 
384   bool isAbsMem() const {
385     return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
386            !getMemIndexReg() && getMemScale() == 1 && isMaybeDirectBranchDest();
387   }
388 
389   bool isAVX512RC() const{
390       return isImm();
391   }
392 
393   bool isAbsMem16() const {
394     return isAbsMem() && Mem.ModeSize == 16;
395   }
396 
397   bool isMemUseUpRegs() const override { return UseUpRegs; }
398 
399   bool isSrcIdx() const {
400     return !getMemIndexReg() && getMemScale() == 1 &&
401       (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
402        getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
403       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
404   }
405   bool isSrcIdx8() const {
406     return isMem8() && isSrcIdx();
407   }
408   bool isSrcIdx16() const {
409     return isMem16() && isSrcIdx();
410   }
411   bool isSrcIdx32() const {
412     return isMem32() && isSrcIdx();
413   }
414   bool isSrcIdx64() const {
415     return isMem64() && isSrcIdx();
416   }
417 
418   bool isDstIdx() const {
419     return !getMemIndexReg() && getMemScale() == 1 &&
420       (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
421       (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
422        getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
423       cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
424   }
425   bool isDstIdx8() const {
426     return isMem8() && isDstIdx();
427   }
428   bool isDstIdx16() const {
429     return isMem16() && isDstIdx();
430   }
431   bool isDstIdx32() const {
432     return isMem32() && isDstIdx();
433   }
434   bool isDstIdx64() const {
435     return isMem64() && isDstIdx();
436   }
437 
438   bool isMemOffs() const {
439     return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
440       getMemScale() == 1;
441   }
442 
443   bool isMemOffs16_8() const {
444     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
445   }
446   bool isMemOffs16_16() const {
447     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
448   }
449   bool isMemOffs16_32() const {
450     return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
451   }
452   bool isMemOffs32_8() const {
453     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
454   }
455   bool isMemOffs32_16() const {
456     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
457   }
458   bool isMemOffs32_32() const {
459     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
460   }
461   bool isMemOffs32_64() const {
462     return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
463   }
464   bool isMemOffs64_8() const {
465     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
466   }
467   bool isMemOffs64_16() const {
468     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
469   }
470   bool isMemOffs64_32() const {
471     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
472   }
473   bool isMemOffs64_64() const {
474     return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
475   }
476 
477   bool isPrefix() const { return Kind == Prefix; }
478   bool isReg() const override { return Kind == Register; }
479   bool isDXReg() const { return Kind == DXRegister; }
480 
481   bool isGR32orGR64() const {
482     return Kind == Register &&
483       (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
484        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
485   }
486 
487   bool isGR16orGR32orGR64() const {
488     return Kind == Register &&
489       (X86MCRegisterClasses[X86::GR16RegClassID].contains(getReg()) ||
490        X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
491        X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
492   }
493 
494   bool isVectorReg() const {
495     return Kind == Register &&
496            (X86MCRegisterClasses[X86::VR64RegClassID].contains(getReg()) ||
497             X86MCRegisterClasses[X86::VR128XRegClassID].contains(getReg()) ||
498             X86MCRegisterClasses[X86::VR256XRegClassID].contains(getReg()) ||
499             X86MCRegisterClasses[X86::VR512RegClassID].contains(getReg()));
500   }
501 
502   bool isVK1Pair() const {
503     return Kind == Register &&
504       X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
505   }
506 
507   bool isVK2Pair() const {
508     return Kind == Register &&
509       X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg());
510   }
511 
512   bool isVK4Pair() const {
513     return Kind == Register &&
514       X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg());
515   }
516 
517   bool isVK8Pair() const {
518     return Kind == Register &&
519       X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg());
520   }
521 
522   bool isVK16Pair() const {
523     return Kind == Register &&
524       X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg());
525   }
526 
527   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
528     // Add as immediates when possible.
529     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
530       Inst.addOperand(MCOperand::createImm(CE->getValue()));
531     else
532       Inst.addOperand(MCOperand::createExpr(Expr));
533   }
534 
535   void addRegOperands(MCInst &Inst, unsigned N) const {
536     assert(N == 1 && "Invalid number of operands!");
537     Inst.addOperand(MCOperand::createReg(getReg()));
538   }
539 
540   void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
541     assert(N == 1 && "Invalid number of operands!");
542     MCRegister RegNo = getReg();
543     if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
544       RegNo = getX86SubSuperRegister(RegNo, 32);
545     Inst.addOperand(MCOperand::createReg(RegNo));
546   }
547 
548   void addGR16orGR32orGR64Operands(MCInst &Inst, unsigned N) const {
549     assert(N == 1 && "Invalid number of operands!");
550     MCRegister RegNo = getReg();
551     if (X86MCRegisterClasses[X86::GR32RegClassID].contains(RegNo) ||
552         X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
553       RegNo = getX86SubSuperRegister(RegNo, 16);
554     Inst.addOperand(MCOperand::createReg(RegNo));
555   }
556 
557   void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
558     assert(N == 1 && "Invalid number of operands!");
559     addExpr(Inst, getImm());
560   }
561 
562   void addImmOperands(MCInst &Inst, unsigned N) const {
563     assert(N == 1 && "Invalid number of operands!");
564     addExpr(Inst, getImm());
565   }
566 
567   void addMaskPairOperands(MCInst &Inst, unsigned N) const {
568     assert(N == 1 && "Invalid number of operands!");
569     unsigned Reg = getReg();
570     switch (Reg) {
571     case X86::K0:
572     case X86::K1:
573       Reg = X86::K0_K1;
574       break;
575     case X86::K2:
576     case X86::K3:
577       Reg = X86::K2_K3;
578       break;
579     case X86::K4:
580     case X86::K5:
581       Reg = X86::K4_K5;
582       break;
583     case X86::K6:
584     case X86::K7:
585       Reg = X86::K6_K7;
586       break;
587     }
588     Inst.addOperand(MCOperand::createReg(Reg));
589   }
590 
591   void addMemOperands(MCInst &Inst, unsigned N) const {
592     assert((N == 5) && "Invalid number of operands!");
593     if (getMemBaseReg())
594       Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
595     else
596       Inst.addOperand(MCOperand::createReg(getMemDefaultBaseReg()));
597     Inst.addOperand(MCOperand::createImm(getMemScale()));
598     Inst.addOperand(MCOperand::createReg(getMemIndexReg()));
599     addExpr(Inst, getMemDisp());
600     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
601   }
602 
603   void addAbsMemOperands(MCInst &Inst, unsigned N) const {
604     assert((N == 1) && "Invalid number of operands!");
605     // Add as immediates when possible.
606     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
607       Inst.addOperand(MCOperand::createImm(CE->getValue()));
608     else
609       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
610   }
611 
612   void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
613     assert((N == 2) && "Invalid number of operands!");
614     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
615     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
616   }
617 
618   void addDstIdxOperands(MCInst &Inst, unsigned N) const {
619     assert((N == 1) && "Invalid number of operands!");
620     Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
621   }
622 
623   void addMemOffsOperands(MCInst &Inst, unsigned N) const {
624     assert((N == 2) && "Invalid number of operands!");
625     // Add as immediates when possible.
626     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
627       Inst.addOperand(MCOperand::createImm(CE->getValue()));
628     else
629       Inst.addOperand(MCOperand::createExpr(getMemDisp()));
630     Inst.addOperand(MCOperand::createReg(getMemSegReg()));
631   }
632 
633   static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
634     SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
635     auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc);
636     Res->Tok.Data = Str.data();
637     Res->Tok.Length = Str.size();
638     return Res;
639   }
640 
641   static std::unique_ptr<X86Operand>
642   CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
643             bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
644             StringRef SymName = StringRef(), void *OpDecl = nullptr) {
645     auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
646     Res->Reg.RegNo = RegNo;
647     Res->AddressOf = AddressOf;
648     Res->OffsetOfLoc = OffsetOfLoc;
649     Res->SymName = SymName;
650     Res->OpDecl = OpDecl;
651     return Res;
652   }
653 
654   static std::unique_ptr<X86Operand>
655   CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
656     return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc);
657   }
658 
659   static std::unique_ptr<X86Operand>
660   CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
661     auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc);
662     Res->Pref.Prefixes = Prefixes;
663     return Res;
664   }
665 
666   static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
667                                                SMLoc StartLoc, SMLoc EndLoc,
668                                                StringRef SymName = StringRef(),
669                                                void *OpDecl = nullptr,
670                                                bool GlobalRef = true) {
671     auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
672     Res->Imm.Val      = Val;
673     Res->Imm.LocalRef = !GlobalRef;
674     Res->SymName      = SymName;
675     Res->OpDecl       = OpDecl;
676     Res->AddressOf    = true;
677     return Res;
678   }
679 
680   /// Create an absolute memory operand.
681   static std::unique_ptr<X86Operand>
682   CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
683             unsigned Size = 0, StringRef SymName = StringRef(),
684             void *OpDecl = nullptr, unsigned FrontendSize = 0,
685             bool UseUpRegs = false, bool MaybeDirectBranchDest = true) {
686     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
687     Res->Mem.SegReg   = 0;
688     Res->Mem.Disp     = Disp;
689     Res->Mem.BaseReg  = 0;
690     Res->Mem.DefaultBaseReg = 0;
691     Res->Mem.IndexReg = 0;
692     Res->Mem.Scale    = 1;
693     Res->Mem.Size     = Size;
694     Res->Mem.ModeSize = ModeSize;
695     Res->Mem.FrontendSize = FrontendSize;
696     Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
697     Res->UseUpRegs = UseUpRegs;
698     Res->SymName      = SymName;
699     Res->OpDecl       = OpDecl;
700     Res->AddressOf    = false;
701     return Res;
702   }
703 
704   /// Create a generalized memory operand.
705   static std::unique_ptr<X86Operand>
706   CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
707             unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
708             SMLoc EndLoc, unsigned Size = 0,
709             unsigned DefaultBaseReg = X86::NoRegister,
710             StringRef SymName = StringRef(), void *OpDecl = nullptr,
711             unsigned FrontendSize = 0, bool UseUpRegs = false,
712             bool MaybeDirectBranchDest = true) {
713     // We should never just have a displacement, that should be parsed as an
714     // absolute memory operand.
715     assert((SegReg || BaseReg || IndexReg || DefaultBaseReg) &&
716            "Invalid memory operand!");
717 
718     // The scale should always be one of {1,2,4,8}.
719     assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
720            "Invalid scale!");
721     auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
722     Res->Mem.SegReg   = SegReg;
723     Res->Mem.Disp     = Disp;
724     Res->Mem.BaseReg  = BaseReg;
725     Res->Mem.DefaultBaseReg = DefaultBaseReg;
726     Res->Mem.IndexReg = IndexReg;
727     Res->Mem.Scale    = Scale;
728     Res->Mem.Size     = Size;
729     Res->Mem.ModeSize = ModeSize;
730     Res->Mem.FrontendSize = FrontendSize;
731     Res->Mem.MaybeDirectBranchDest = MaybeDirectBranchDest;
732     Res->UseUpRegs = UseUpRegs;
733     Res->SymName      = SymName;
734     Res->OpDecl       = OpDecl;
735     Res->AddressOf    = false;
736     return Res;
737   }
738 };
739 
740 } // end namespace llvm
741 
742 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
743