1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the X86MCCodeEmitter class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/X86MCTargetDesc.h" 15 #include "MCTargetDesc/X86BaseInfo.h" 16 #include "MCTargetDesc/X86FixupKinds.h" 17 #include "llvm/MC/MCCodeEmitter.h" 18 #include "llvm/MC/MCContext.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCInst.h" 21 #include "llvm/MC/MCInstrInfo.h" 22 #include "llvm/MC/MCRegisterInfo.h" 23 #include "llvm/MC/MCSubtargetInfo.h" 24 #include "llvm/MC/MCSymbol.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 using namespace llvm; 28 29 #define DEBUG_TYPE "mccodeemitter" 30 31 namespace { 32 class X86MCCodeEmitter : public MCCodeEmitter { 33 X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION; 34 void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION; 35 const MCInstrInfo &MCII; 36 MCContext &Ctx; 37 public: 38 X86MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx) 39 : MCII(mcii), Ctx(ctx) { 40 } 41 42 ~X86MCCodeEmitter() {} 43 44 bool is64BitMode(const MCSubtargetInfo &STI) const { 45 return (STI.getFeatureBits() & X86::Mode64Bit) != 0; 46 } 47 48 bool is32BitMode(const MCSubtargetInfo &STI) const { 49 return (STI.getFeatureBits() & X86::Mode32Bit) != 0; 50 } 51 52 bool is16BitMode(const MCSubtargetInfo &STI) const { 53 return (STI.getFeatureBits() & X86::Mode16Bit) != 0; 54 } 55 56 /// Is16BitMemOperand - Return true if the specified instruction has 57 /// a 16-bit memory operand. Op specifies the operand # of the memoperand. 58 bool Is16BitMemOperand(const MCInst &MI, unsigned Op, 59 const MCSubtargetInfo &STI) const { 60 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 61 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 62 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 63 64 if (is16BitMode(STI) && BaseReg.getReg() == 0 && 65 Disp.isImm() && Disp.getImm() < 0x10000) 66 return true; 67 if ((BaseReg.getReg() != 0 && 68 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) || 69 (IndexReg.getReg() != 0 && 70 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg()))) 71 return true; 72 return false; 73 } 74 75 unsigned GetX86RegNum(const MCOperand &MO) const { 76 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7; 77 } 78 79 // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range 80 // 0-7 and the difference between the 2 groups is given by the REX prefix. 81 // In the VEX prefix, registers are seen sequencially from 0-15 and encoded 82 // in 1's complement form, example: 83 // 84 // ModRM field => XMM9 => 1 85 // VEX.VVVV => XMM9 => ~9 86 // 87 // See table 4-35 of Intel AVX Programming Reference for details. 88 unsigned char getVEXRegisterEncoding(const MCInst &MI, 89 unsigned OpNum) const { 90 unsigned SrcReg = MI.getOperand(OpNum).getReg(); 91 unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum)); 92 if (X86II::isX86_64ExtendedReg(SrcReg)) 93 SrcRegNum |= 8; 94 95 // The registers represented through VEX_VVVV should 96 // be encoded in 1's complement form. 97 return (~SrcRegNum) & 0xf; 98 } 99 100 unsigned char getWriteMaskRegisterEncoding(const MCInst &MI, 101 unsigned OpNum) const { 102 assert(X86::K0 != MI.getOperand(OpNum).getReg() && 103 "Invalid mask register as write-mask!"); 104 unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum)); 105 return MaskRegNum; 106 } 107 108 void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const { 109 OS << (char)C; 110 ++CurByte; 111 } 112 113 void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte, 114 raw_ostream &OS) const { 115 // Output the constant in little endian byte order. 116 for (unsigned i = 0; i != Size; ++i) { 117 EmitByte(Val & 255, CurByte, OS); 118 Val >>= 8; 119 } 120 } 121 122 void EmitImmediate(const MCOperand &Disp, SMLoc Loc, 123 unsigned ImmSize, MCFixupKind FixupKind, 124 unsigned &CurByte, raw_ostream &OS, 125 SmallVectorImpl<MCFixup> &Fixups, 126 int ImmOffset = 0) const; 127 128 inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode, 129 unsigned RM) { 130 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!"); 131 return RM | (RegOpcode << 3) | (Mod << 6); 132 } 133 134 void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld, 135 unsigned &CurByte, raw_ostream &OS) const { 136 EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS); 137 } 138 139 void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base, 140 unsigned &CurByte, raw_ostream &OS) const { 141 // SIB byte is in the same format as the ModRMByte. 142 EmitByte(ModRMByte(SS, Index, Base), CurByte, OS); 143 } 144 145 146 void EmitMemModRMByte(const MCInst &MI, unsigned Op, 147 unsigned RegOpcodeField, 148 uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS, 149 SmallVectorImpl<MCFixup> &Fixups, 150 const MCSubtargetInfo &STI) const; 151 152 void EncodeInstruction(const MCInst &MI, raw_ostream &OS, 153 SmallVectorImpl<MCFixup> &Fixups, 154 const MCSubtargetInfo &STI) const override; 155 156 void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 157 const MCInst &MI, const MCInstrDesc &Desc, 158 raw_ostream &OS) const; 159 160 void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand, 161 const MCInst &MI, raw_ostream &OS) const; 162 163 void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, 164 const MCInst &MI, const MCInstrDesc &Desc, 165 const MCSubtargetInfo &STI, 166 raw_ostream &OS) const; 167 }; 168 169 } // end anonymous namespace 170 171 172 MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII, 173 const MCRegisterInfo &MRI, 174 const MCSubtargetInfo &STI, 175 MCContext &Ctx) { 176 return new X86MCCodeEmitter(MCII, Ctx); 177 } 178 179 /// isDisp8 - Return true if this signed displacement fits in a 8-bit 180 /// sign-extended field. 181 static bool isDisp8(int Value) { 182 return Value == (signed char)Value; 183 } 184 185 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit 186 /// compressed dispacement field. 187 static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) { 188 assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) && 189 "Compressed 8-bit displacement is only valid for EVEX inst."); 190 191 unsigned CD8_Scale = 192 (TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift; 193 if (CD8_Scale == 0) { 194 CValue = Value; 195 return isDisp8(Value); 196 } 197 198 unsigned Mask = CD8_Scale - 1; 199 assert((CD8_Scale & Mask) == 0 && "Invalid memory object size."); 200 if (Value & Mask) // Unaligned offset 201 return false; 202 Value /= (int)CD8_Scale; 203 bool Ret = (Value == (signed char)Value); 204 205 if (Ret) 206 CValue = Value; 207 return Ret; 208 } 209 210 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate 211 /// in an instruction with the specified TSFlags. 212 static MCFixupKind getImmFixupKind(uint64_t TSFlags) { 213 unsigned Size = X86II::getSizeOfImm(TSFlags); 214 bool isPCRel = X86II::isImmPCRel(TSFlags); 215 216 if (X86II::isImmSigned(TSFlags)) { 217 switch (Size) { 218 default: llvm_unreachable("Unsupported signed fixup size!"); 219 case 4: return MCFixupKind(X86::reloc_signed_4byte); 220 } 221 } 222 return MCFixup::getKindForSize(Size, isPCRel); 223 } 224 225 /// Is32BitMemOperand - Return true if the specified instruction has 226 /// a 32-bit memory operand. Op specifies the operand # of the memoperand. 227 static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) { 228 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 229 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 230 231 if ((BaseReg.getReg() != 0 && 232 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) || 233 (IndexReg.getReg() != 0 && 234 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg()))) 235 return true; 236 return false; 237 } 238 239 /// Is64BitMemOperand - Return true if the specified instruction has 240 /// a 64-bit memory operand. Op specifies the operand # of the memoperand. 241 #ifndef NDEBUG 242 static bool Is64BitMemOperand(const MCInst &MI, unsigned Op) { 243 const MCOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg); 244 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 245 246 if ((BaseReg.getReg() != 0 && 247 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) || 248 (IndexReg.getReg() != 0 && 249 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg()))) 250 return true; 251 return false; 252 } 253 #endif 254 255 /// StartsWithGlobalOffsetTable - Check if this expression starts with 256 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form 257 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF 258 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that 259 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start 260 /// of a binary expression. 261 enum GlobalOffsetTableExprKind { 262 GOT_None, 263 GOT_Normal, 264 GOT_SymDiff 265 }; 266 static GlobalOffsetTableExprKind 267 StartsWithGlobalOffsetTable(const MCExpr *Expr) { 268 const MCExpr *RHS = nullptr; 269 if (Expr->getKind() == MCExpr::Binary) { 270 const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(Expr); 271 Expr = BE->getLHS(); 272 RHS = BE->getRHS(); 273 } 274 275 if (Expr->getKind() != MCExpr::SymbolRef) 276 return GOT_None; 277 278 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 279 const MCSymbol &S = Ref->getSymbol(); 280 if (S.getName() != "_GLOBAL_OFFSET_TABLE_") 281 return GOT_None; 282 if (RHS && RHS->getKind() == MCExpr::SymbolRef) 283 return GOT_SymDiff; 284 return GOT_Normal; 285 } 286 287 static bool HasSecRelSymbolRef(const MCExpr *Expr) { 288 if (Expr->getKind() == MCExpr::SymbolRef) { 289 const MCSymbolRefExpr *Ref = static_cast<const MCSymbolRefExpr*>(Expr); 290 return Ref->getKind() == MCSymbolRefExpr::VK_SECREL; 291 } 292 return false; 293 } 294 295 void X86MCCodeEmitter:: 296 EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size, 297 MCFixupKind FixupKind, unsigned &CurByte, raw_ostream &OS, 298 SmallVectorImpl<MCFixup> &Fixups, int ImmOffset) const { 299 const MCExpr *Expr = nullptr; 300 if (DispOp.isImm()) { 301 // If this is a simple integer displacement that doesn't require a 302 // relocation, emit it now. 303 if (FixupKind != FK_PCRel_1 && 304 FixupKind != FK_PCRel_2 && 305 FixupKind != FK_PCRel_4) { 306 EmitConstant(DispOp.getImm()+ImmOffset, Size, CurByte, OS); 307 return; 308 } 309 Expr = MCConstantExpr::Create(DispOp.getImm(), Ctx); 310 } else { 311 Expr = DispOp.getExpr(); 312 } 313 314 // If we have an immoffset, add it to the expression. 315 if ((FixupKind == FK_Data_4 || 316 FixupKind == FK_Data_8 || 317 FixupKind == MCFixupKind(X86::reloc_signed_4byte))) { 318 GlobalOffsetTableExprKind Kind = StartsWithGlobalOffsetTable(Expr); 319 if (Kind != GOT_None) { 320 assert(ImmOffset == 0); 321 322 if (Size == 8) { 323 FixupKind = MCFixupKind(X86::reloc_global_offset_table8); 324 } else { 325 assert(Size == 4); 326 FixupKind = MCFixupKind(X86::reloc_global_offset_table); 327 } 328 329 if (Kind == GOT_Normal) 330 ImmOffset = CurByte; 331 } else if (Expr->getKind() == MCExpr::SymbolRef) { 332 if (HasSecRelSymbolRef(Expr)) { 333 FixupKind = MCFixupKind(FK_SecRel_4); 334 } 335 } else if (Expr->getKind() == MCExpr::Binary) { 336 const MCBinaryExpr *Bin = static_cast<const MCBinaryExpr*>(Expr); 337 if (HasSecRelSymbolRef(Bin->getLHS()) 338 || HasSecRelSymbolRef(Bin->getRHS())) { 339 FixupKind = MCFixupKind(FK_SecRel_4); 340 } 341 } 342 } 343 344 // If the fixup is pc-relative, we need to bias the value to be relative to 345 // the start of the field, not the end of the field. 346 if (FixupKind == FK_PCRel_4 || 347 FixupKind == MCFixupKind(X86::reloc_riprel_4byte) || 348 FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load)) 349 ImmOffset -= 4; 350 if (FixupKind == FK_PCRel_2) 351 ImmOffset -= 2; 352 if (FixupKind == FK_PCRel_1) 353 ImmOffset -= 1; 354 355 if (ImmOffset) 356 Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx), 357 Ctx); 358 359 // Emit a symbolic constant as a fixup and 4 zeros. 360 Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind, Loc)); 361 EmitConstant(0, Size, CurByte, OS); 362 } 363 364 void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op, 365 unsigned RegOpcodeField, 366 uint64_t TSFlags, unsigned &CurByte, 367 raw_ostream &OS, 368 SmallVectorImpl<MCFixup> &Fixups, 369 const MCSubtargetInfo &STI) const{ 370 const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp); 371 const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg); 372 const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt); 373 const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg); 374 unsigned BaseReg = Base.getReg(); 375 bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX; 376 377 // Handle %rip relative addressing. 378 if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode 379 assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode"); 380 assert(IndexReg.getReg() == 0 && "Invalid rip-relative address"); 381 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 382 383 unsigned FixupKind = X86::reloc_riprel_4byte; 384 385 // movq loads are handled with a special relocation form which allows the 386 // linker to eliminate some loads for GOT references which end up in the 387 // same linkage unit. 388 if (MI.getOpcode() == X86::MOV64rm) 389 FixupKind = X86::reloc_riprel_4byte_movq_load; 390 391 // rip-relative addressing is actually relative to the *next* instruction. 392 // Since an immediate can follow the mod/rm byte for an instruction, this 393 // means that we need to bias the immediate field of the instruction with 394 // the size of the immediate field. If we have this case, add it into the 395 // expression to emit. 396 int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0; 397 398 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), 399 CurByte, OS, Fixups, -ImmSize); 400 return; 401 } 402 403 unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U; 404 405 // 16-bit addressing forms of the ModR/M byte have a different encoding for 406 // the R/M field and are far more limited in which registers can be used. 407 if (Is16BitMemOperand(MI, Op, STI)) { 408 if (BaseReg) { 409 // For 32-bit addressing, the row and column values in Table 2-2 are 410 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with 411 // some special cases. And GetX86RegNum reflects that numbering. 412 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A, 413 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only 414 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order, 415 // while values 0-3 indicate the allowed combinations (base+index) of 416 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI. 417 // 418 // R16Table[] is a lookup from the normal RegNo, to the row values from 419 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed. 420 static const unsigned R16Table[] = { 0, 0, 0, 7, 0, 6, 4, 5 }; 421 unsigned RMfield = R16Table[BaseRegNo]; 422 423 assert(RMfield && "invalid 16-bit base register"); 424 425 if (IndexReg.getReg()) { 426 unsigned IndexReg16 = R16Table[GetX86RegNum(IndexReg)]; 427 428 assert(IndexReg16 && "invalid 16-bit index register"); 429 // We must have one of SI/DI (4,5), and one of BP/BX (6,7). 430 assert(((IndexReg16 ^ RMfield) & 2) && 431 "invalid 16-bit base/index register combination"); 432 assert(Scale.getImm() == 1 && 433 "invalid scale for 16-bit memory reference"); 434 435 // Allow base/index to appear in either order (although GAS doesn't). 436 if (IndexReg16 & 2) 437 RMfield = (RMfield & 1) | ((7 - IndexReg16) << 1); 438 else 439 RMfield = (IndexReg16 & 1) | ((7 - RMfield) << 1); 440 } 441 442 if (Disp.isImm() && isDisp8(Disp.getImm())) { 443 if (Disp.getImm() == 0 && BaseRegNo != N86::EBP) { 444 // There is no displacement; just the register. 445 EmitByte(ModRMByte(0, RegOpcodeField, RMfield), CurByte, OS); 446 return; 447 } 448 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded. 449 EmitByte(ModRMByte(1, RegOpcodeField, RMfield), CurByte, OS); 450 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 451 return; 452 } 453 // This is the [REG]+disp16 case. 454 EmitByte(ModRMByte(2, RegOpcodeField, RMfield), CurByte, OS); 455 } else { 456 // There is no BaseReg; this is the plain [disp16] case. 457 EmitByte(ModRMByte(0, RegOpcodeField, 6), CurByte, OS); 458 } 459 460 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases. 461 EmitImmediate(Disp, MI.getLoc(), 2, FK_Data_2, CurByte, OS, Fixups); 462 return; 463 } 464 465 // Determine whether a SIB byte is needed. 466 // If no BaseReg, issue a RIP relative instruction only if the MCE can 467 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table 468 // 2-7) and absolute references. 469 470 if (// The SIB byte must be used if there is an index register. 471 IndexReg.getReg() == 0 && 472 // The SIB byte must be used if the base is ESP/RSP/R12, all of which 473 // encode to an R/M value of 4, which indicates that a SIB byte is 474 // present. 475 BaseRegNo != N86::ESP && 476 // If there is no base register and we're in 64-bit mode, we need a SIB 477 // byte to emit an addr that is just 'disp32' (the non-RIP relative form). 478 (!is64BitMode(STI) || BaseReg != 0)) { 479 480 if (BaseReg == 0) { // [disp32] in X86-32 mode 481 EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS); 482 EmitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, CurByte, OS, Fixups); 483 return; 484 } 485 486 // If the base is not EBP/ESP and there is no displacement, use simple 487 // indirect register encoding, this handles addresses like [EAX]. The 488 // encoding for [EBP] with no displacement means [disp32] so we handle it 489 // by emitting a displacement of 0 below. 490 if (Disp.isImm() && Disp.getImm() == 0 && BaseRegNo != N86::EBP) { 491 EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS); 492 return; 493 } 494 495 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8]. 496 if (Disp.isImm()) { 497 if (!HasEVEX && isDisp8(Disp.getImm())) { 498 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 499 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups); 500 return; 501 } 502 // Try EVEX compressed 8-bit displacement first; if failed, fall back to 503 // 32-bit displacement. 504 int CDisp8 = 0; 505 if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { 506 EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS); 507 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, 508 CDisp8 - Disp.getImm()); 509 return; 510 } 511 } 512 513 // Otherwise, emit the most general non-SIB encoding: [REG+disp32] 514 EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS); 515 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), CurByte, OS, 516 Fixups); 517 return; 518 } 519 520 // We need a SIB byte, so start by outputting the ModR/M byte first 521 assert(IndexReg.getReg() != X86::ESP && 522 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); 523 524 bool ForceDisp32 = false; 525 bool ForceDisp8 = false; 526 int CDisp8 = 0; 527 int ImmOffset = 0; 528 if (BaseReg == 0) { 529 // If there is no base register, we emit the special case SIB byte with 530 // MOD=0, BASE=5, to JUST get the index, scale, and displacement. 531 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 532 ForceDisp32 = true; 533 } else if (!Disp.isImm()) { 534 // Emit the normal disp32 encoding. 535 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 536 ForceDisp32 = true; 537 } else if (Disp.getImm() == 0 && 538 // Base reg can't be anything that ends up with '5' as the base 539 // reg, it is the magic [*] nomenclature that indicates no base. 540 BaseRegNo != N86::EBP) { 541 // Emit no displacement ModR/M byte 542 EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS); 543 } else if (!HasEVEX && isDisp8(Disp.getImm())) { 544 // Emit the disp8 encoding. 545 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 546 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 547 } else if (HasEVEX && isCDisp8(TSFlags, Disp.getImm(), CDisp8)) { 548 // Emit the disp8 encoding. 549 EmitByte(ModRMByte(1, RegOpcodeField, 4), CurByte, OS); 550 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP 551 ImmOffset = CDisp8 - Disp.getImm(); 552 } else { 553 // Emit the normal disp32 encoding. 554 EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS); 555 } 556 557 // Calculate what the SS field value should be... 558 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 }; 559 unsigned SS = SSTable[Scale.getImm()]; 560 561 if (BaseReg == 0) { 562 // Handle the SIB byte for the case where there is no base, see Intel 563 // Manual 2A, table 2-7. The displacement has already been output. 564 unsigned IndexRegNo; 565 if (IndexReg.getReg()) 566 IndexRegNo = GetX86RegNum(IndexReg); 567 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5) 568 IndexRegNo = 4; 569 EmitSIBByte(SS, IndexRegNo, 5, CurByte, OS); 570 } else { 571 unsigned IndexRegNo; 572 if (IndexReg.getReg()) 573 IndexRegNo = GetX86RegNum(IndexReg); 574 else 575 IndexRegNo = 4; // For example [ESP+1*<noreg>+4] 576 EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS); 577 } 578 579 // Do we need to output a displacement? 580 if (ForceDisp8) 581 EmitImmediate(Disp, MI.getLoc(), 1, FK_Data_1, CurByte, OS, Fixups, ImmOffset); 582 else if (ForceDisp32 || Disp.getImm() != 0) 583 EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte), 584 CurByte, OS, Fixups); 585 } 586 587 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix 588 /// called VEX. 589 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 590 int MemOperand, const MCInst &MI, 591 const MCInstrDesc &Desc, 592 raw_ostream &OS) const { 593 assert(!(TSFlags & X86II::LOCK) && "Can't have LOCK VEX."); 594 595 uint64_t Encoding = TSFlags & X86II::EncodingMask; 596 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 597 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 598 bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3; 599 bool HasMemOp4 = TSFlags & X86II::MemOp4; 600 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 601 602 // VEX_R: opcode externsion equivalent to REX.R in 603 // 1's complement (inverted) form 604 // 605 // 1: Same as REX_R=0 (must be 1 in 32-bit mode) 606 // 0: Same as REX_R=1 (64 bit mode only) 607 // 608 unsigned char VEX_R = 0x1; 609 unsigned char EVEX_R2 = 0x1; 610 611 // VEX_X: equivalent to REX.X, only used when a 612 // register is used for index in SIB Byte. 613 // 614 // 1: Same as REX.X=0 (must be 1 in 32-bit mode) 615 // 0: Same as REX.X=1 (64-bit mode only) 616 unsigned char VEX_X = 0x1; 617 618 // VEX_B: 619 // 620 // 1: Same as REX_B=0 (ignored in 32-bit mode) 621 // 0: Same as REX_B=1 (64 bit mode only) 622 // 623 unsigned char VEX_B = 0x1; 624 625 // VEX_W: opcode specific (use like REX.W, or used for 626 // opcode extension, or ignored, depending on the opcode byte) 627 unsigned char VEX_W = 0; 628 629 // VEX_5M (VEX m-mmmmm field): 630 // 631 // 0b00000: Reserved for future use 632 // 0b00001: implied 0F leading opcode 633 // 0b00010: implied 0F 38 leading opcode bytes 634 // 0b00011: implied 0F 3A leading opcode bytes 635 // 0b00100-0b11111: Reserved for future use 636 // 0b01000: XOP map select - 08h instructions with imm byte 637 // 0b01001: XOP map select - 09h instructions with no imm byte 638 // 0b01010: XOP map select - 0Ah instructions with imm dword 639 unsigned char VEX_5M = 0; 640 641 // VEX_4V (VEX vvvv field): a register specifier 642 // (in 1's complement form) or 1111 if unused. 643 unsigned char VEX_4V = 0xf; 644 unsigned char EVEX_V2 = 0x1; 645 646 // VEX_L (Vector Length): 647 // 648 // 0: scalar or 128-bit vector 649 // 1: 256-bit vector 650 // 651 unsigned char VEX_L = 0; 652 unsigned char EVEX_L2 = 0; 653 654 // VEX_PP: opcode extension providing equivalent 655 // functionality of a SIMD prefix 656 // 657 // 0b00: None 658 // 0b01: 66 659 // 0b10: F3 660 // 0b11: F2 661 // 662 unsigned char VEX_PP = 0; 663 664 // EVEX_U 665 unsigned char EVEX_U = 1; // Always '1' so far 666 667 // EVEX_z 668 unsigned char EVEX_z = 0; 669 670 // EVEX_b 671 unsigned char EVEX_b = 0; 672 673 // EVEX_rc 674 unsigned char EVEX_rc = 0; 675 676 // EVEX_aaa 677 unsigned char EVEX_aaa = 0; 678 679 bool EncodeRC = false; 680 681 if (TSFlags & X86II::VEX_W) 682 VEX_W = 1; 683 684 if (TSFlags & X86II::VEX_L) 685 VEX_L = 1; 686 if (TSFlags & X86II::EVEX_L2) 687 EVEX_L2 = 1; 688 689 if (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) 690 EVEX_z = 1; 691 692 if ((TSFlags & X86II::EVEX_B)) 693 EVEX_b = 1; 694 695 switch (TSFlags & X86II::OpPrefixMask) { 696 default: break; // VEX_PP already correct 697 case X86II::PD: VEX_PP = 0x1; break; // 66 698 case X86II::XS: VEX_PP = 0x2; break; // F3 699 case X86II::XD: VEX_PP = 0x3; break; // F2 700 } 701 702 switch (TSFlags & X86II::OpMapMask) { 703 default: llvm_unreachable("Invalid prefix!"); 704 case X86II::TB: VEX_5M = 0x1; break; // 0F 705 case X86II::T8: VEX_5M = 0x2; break; // 0F 38 706 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A 707 case X86II::XOP8: VEX_5M = 0x8; break; 708 case X86II::XOP9: VEX_5M = 0x9; break; 709 case X86II::XOPA: VEX_5M = 0xA; break; 710 } 711 712 // Classify VEX_B, VEX_4V, VEX_R, VEX_X 713 unsigned NumOps = Desc.getNumOperands(); 714 unsigned CurOp = X86II::getOperandBias(Desc); 715 716 switch (TSFlags & X86II::FormMask) { 717 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!"); 718 case X86II::RawFrm: 719 break; 720 case X86II::MRMDestMem: { 721 // MRMDestMem instructions forms: 722 // MemAddr, src1(ModR/M) 723 // MemAddr, src1(VEX_4V), src2(ModR/M) 724 // MemAddr, src1(ModR/M), imm8 725 // 726 if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand + 727 X86::AddrBaseReg).getReg())) 728 VEX_B = 0x0; 729 if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand + 730 X86::AddrIndexReg).getReg())) 731 VEX_X = 0x0; 732 if (X86II::is32ExtendedReg(MI.getOperand(MemOperand + 733 X86::AddrIndexReg).getReg())) 734 EVEX_V2 = 0x0; 735 736 CurOp += X86::AddrNumOperands; 737 738 if (HasEVEX_K) 739 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); 740 741 if (HasVEX_4V) { 742 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 743 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 744 EVEX_V2 = 0x0; 745 CurOp++; 746 } 747 748 const MCOperand &MO = MI.getOperand(CurOp); 749 if (MO.isReg()) { 750 if (X86II::isX86_64ExtendedReg(MO.getReg())) 751 VEX_R = 0x0; 752 if (X86II::is32ExtendedReg(MO.getReg())) 753 EVEX_R2 = 0x0; 754 } 755 break; 756 } 757 case X86II::MRMSrcMem: 758 // MRMSrcMem instructions forms: 759 // src1(ModR/M), MemAddr 760 // src1(ModR/M), src2(VEX_4V), MemAddr 761 // src1(ModR/M), MemAddr, imm8 762 // src1(ModR/M), MemAddr, src2(VEX_I8IMM) 763 // 764 // FMA4: 765 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) 766 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), 767 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 768 VEX_R = 0x0; 769 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 770 EVEX_R2 = 0x0; 771 CurOp++; 772 773 if (HasEVEX_K) 774 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); 775 776 if (HasVEX_4V) { 777 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 778 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 779 EVEX_V2 = 0x0; 780 CurOp++; 781 } 782 783 if (X86II::isX86_64ExtendedReg( 784 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) 785 VEX_B = 0x0; 786 if (X86II::isX86_64ExtendedReg( 787 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg())) 788 VEX_X = 0x0; 789 if (X86II::is32ExtendedReg(MI.getOperand(MemOperand + 790 X86::AddrIndexReg).getReg())) 791 EVEX_V2 = 0x0; 792 793 if (HasVEX_4VOp3) 794 // Instruction format for 4VOp3: 795 // src1(ModR/M), MemAddr, src3(VEX_4V) 796 // CurOp points to start of the MemoryOperand, 797 // it skips TIED_TO operands if exist, then increments past src1. 798 // CurOp + X86::AddrNumOperands will point to src3. 799 VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands); 800 break; 801 case X86II::MRM0m: case X86II::MRM1m: 802 case X86II::MRM2m: case X86II::MRM3m: 803 case X86II::MRM4m: case X86II::MRM5m: 804 case X86II::MRM6m: case X86II::MRM7m: { 805 // MRM[0-9]m instructions forms: 806 // MemAddr 807 // src1(VEX_4V), MemAddr 808 if (HasVEX_4V) { 809 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 810 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 811 EVEX_V2 = 0x0; 812 CurOp++; 813 } 814 815 if (HasEVEX_K) 816 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); 817 818 if (X86II::isX86_64ExtendedReg( 819 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg())) 820 VEX_B = 0x0; 821 if (X86II::isX86_64ExtendedReg( 822 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg())) 823 VEX_X = 0x0; 824 break; 825 } 826 case X86II::MRMSrcReg: 827 // MRMSrcReg instructions forms: 828 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) 829 // dst(ModR/M), src1(ModR/M) 830 // dst(ModR/M), src1(ModR/M), imm8 831 // 832 // FMA4: 833 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM) 834 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), 835 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 836 VEX_R = 0x0; 837 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 838 EVEX_R2 = 0x0; 839 CurOp++; 840 841 if (HasEVEX_K) 842 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); 843 844 if (HasVEX_4V) { 845 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 846 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 847 EVEX_V2 = 0x0; 848 CurOp++; 849 } 850 851 if (HasMemOp4) // Skip second register source (encoded in I8IMM) 852 CurOp++; 853 854 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 855 VEX_B = 0x0; 856 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 857 VEX_X = 0x0; 858 CurOp++; 859 if (HasVEX_4VOp3) 860 VEX_4V = getVEXRegisterEncoding(MI, CurOp++); 861 if (EVEX_b) { 862 if (HasEVEX_RC) { 863 unsigned RcOperand = NumOps-1; 864 assert(RcOperand >= CurOp); 865 EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3; 866 } 867 EncodeRC = true; 868 } 869 break; 870 case X86II::MRMDestReg: 871 // MRMDestReg instructions forms: 872 // dst(ModR/M), src(ModR/M) 873 // dst(ModR/M), src(ModR/M), imm8 874 // dst(ModR/M), src1(VEX_4V), src2(ModR/M) 875 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 876 VEX_B = 0x0; 877 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 878 VEX_X = 0x0; 879 CurOp++; 880 881 if (HasEVEX_K) 882 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); 883 884 if (HasVEX_4V) { 885 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 886 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 887 EVEX_V2 = 0x0; 888 CurOp++; 889 } 890 891 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 892 VEX_R = 0x0; 893 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 894 EVEX_R2 = 0x0; 895 if (EVEX_b) 896 EncodeRC = true; 897 break; 898 case X86II::MRM0r: case X86II::MRM1r: 899 case X86II::MRM2r: case X86II::MRM3r: 900 case X86II::MRM4r: case X86II::MRM5r: 901 case X86II::MRM6r: case X86II::MRM7r: 902 // MRM0r-MRM7r instructions forms: 903 // dst(VEX_4V), src(ModR/M), imm8 904 if (HasVEX_4V) { 905 VEX_4V = getVEXRegisterEncoding(MI, CurOp); 906 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 907 EVEX_V2 = 0x0; 908 CurOp++; 909 } 910 if (HasEVEX_K) 911 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); 912 913 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) 914 VEX_B = 0x0; 915 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) 916 VEX_X = 0x0; 917 break; 918 } 919 920 if (Encoding == X86II::VEX || Encoding == X86II::XOP) { 921 // VEX opcode prefix can have 2 or 3 bytes 922 // 923 // 3 bytes: 924 // +-----+ +--------------+ +-------------------+ 925 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp | 926 // +-----+ +--------------+ +-------------------+ 927 // 2 bytes: 928 // +-----+ +-------------------+ 929 // | C5h | | R | vvvv | L | pp | 930 // +-----+ +-------------------+ 931 // 932 // XOP uses a similar prefix: 933 // +-----+ +--------------+ +-------------------+ 934 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | 935 // +-----+ +--------------+ +-------------------+ 936 unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); 937 938 // Can we use the 2 byte VEX prefix? 939 if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { 940 EmitByte(0xC5, CurByte, OS); 941 EmitByte(LastByte | (VEX_R << 7), CurByte, OS); 942 return; 943 } 944 945 // 3 byte VEX prefix 946 EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS); 947 EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); 948 EmitByte(LastByte | (VEX_W << 7), CurByte, OS); 949 } else { 950 assert(Encoding == X86II::EVEX && "unknown encoding!"); 951 // EVEX opcode prefix can have 4 bytes 952 // 953 // +-----+ +--------------+ +-------------------+ +------------------------+ 954 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa | 955 // +-----+ +--------------+ +-------------------+ +------------------------+ 956 assert((VEX_5M & 0x3) == VEX_5M 957 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!"); 958 959 VEX_5M &= 0x3; 960 961 EmitByte(0x62, CurByte, OS); 962 EmitByte((VEX_R << 7) | 963 (VEX_X << 6) | 964 (VEX_B << 5) | 965 (EVEX_R2 << 4) | 966 VEX_5M, CurByte, OS); 967 EmitByte((VEX_W << 7) | 968 (VEX_4V << 3) | 969 (EVEX_U << 2) | 970 VEX_PP, CurByte, OS); 971 if (EncodeRC) 972 EmitByte((EVEX_z << 7) | 973 (EVEX_rc << 5) | 974 (EVEX_b << 4) | 975 (EVEX_V2 << 3) | 976 EVEX_aaa, CurByte, OS); 977 else 978 EmitByte((EVEX_z << 7) | 979 (EVEX_L2 << 6) | 980 (VEX_L << 5) | 981 (EVEX_b << 4) | 982 (EVEX_V2 << 3) | 983 EVEX_aaa, CurByte, OS); 984 } 985 } 986 987 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64 988 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand 989 /// size, and 3) use of X86-64 extended registers. 990 static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags, 991 const MCInstrDesc &Desc) { 992 unsigned REX = 0; 993 if (TSFlags & X86II::REX_W) 994 REX |= 1 << 3; // set REX.W 995 996 if (MI.getNumOperands() == 0) return REX; 997 998 unsigned NumOps = MI.getNumOperands(); 999 // FIXME: MCInst should explicitize the two-addrness. 1000 bool isTwoAddr = NumOps > 1 && 1001 Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1; 1002 1003 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. 1004 unsigned i = isTwoAddr ? 1 : 0; 1005 for (; i != NumOps; ++i) { 1006 const MCOperand &MO = MI.getOperand(i); 1007 if (!MO.isReg()) continue; 1008 unsigned Reg = MO.getReg(); 1009 if (!X86II::isX86_64NonExtLowByteReg(Reg)) continue; 1010 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything 1011 // that returns non-zero. 1012 REX |= 0x40; // REX fixed encoding prefix 1013 break; 1014 } 1015 1016 switch (TSFlags & X86II::FormMask) { 1017 case X86II::MRMSrcReg: 1018 if (MI.getOperand(0).isReg() && 1019 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 1020 REX |= 1 << 2; // set REX.R 1021 i = isTwoAddr ? 2 : 1; 1022 for (; i != NumOps; ++i) { 1023 const MCOperand &MO = MI.getOperand(i); 1024 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg())) 1025 REX |= 1 << 0; // set REX.B 1026 } 1027 break; 1028 case X86II::MRMSrcMem: { 1029 if (MI.getOperand(0).isReg() && 1030 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 1031 REX |= 1 << 2; // set REX.R 1032 unsigned Bit = 0; 1033 i = isTwoAddr ? 2 : 1; 1034 for (; i != NumOps; ++i) { 1035 const MCOperand &MO = MI.getOperand(i); 1036 if (MO.isReg()) { 1037 if (X86II::isX86_64ExtendedReg(MO.getReg())) 1038 REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1) 1039 Bit++; 1040 } 1041 } 1042 break; 1043 } 1044 case X86II::MRMXm: 1045 case X86II::MRM0m: case X86II::MRM1m: 1046 case X86II::MRM2m: case X86II::MRM3m: 1047 case X86II::MRM4m: case X86II::MRM5m: 1048 case X86II::MRM6m: case X86II::MRM7m: 1049 case X86II::MRMDestMem: { 1050 unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands); 1051 i = isTwoAddr ? 1 : 0; 1052 if (NumOps > e && MI.getOperand(e).isReg() && 1053 X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg())) 1054 REX |= 1 << 2; // set REX.R 1055 unsigned Bit = 0; 1056 for (; i != e; ++i) { 1057 const MCOperand &MO = MI.getOperand(i); 1058 if (MO.isReg()) { 1059 if (X86II::isX86_64ExtendedReg(MO.getReg())) 1060 REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1) 1061 Bit++; 1062 } 1063 } 1064 break; 1065 } 1066 default: 1067 if (MI.getOperand(0).isReg() && 1068 X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg())) 1069 REX |= 1 << 0; // set REX.B 1070 i = isTwoAddr ? 2 : 1; 1071 for (unsigned e = NumOps; i != e; ++i) { 1072 const MCOperand &MO = MI.getOperand(i); 1073 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg())) 1074 REX |= 1 << 2; // set REX.R 1075 } 1076 break; 1077 } 1078 return REX; 1079 } 1080 1081 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed 1082 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte, 1083 unsigned SegOperand, 1084 const MCInst &MI, 1085 raw_ostream &OS) const { 1086 // Check for explicit segment override on memory operand. 1087 switch (MI.getOperand(SegOperand).getReg()) { 1088 default: llvm_unreachable("Unknown segment register!"); 1089 case 0: break; 1090 case X86::CS: EmitByte(0x2E, CurByte, OS); break; 1091 case X86::SS: EmitByte(0x36, CurByte, OS); break; 1092 case X86::DS: EmitByte(0x3E, CurByte, OS); break; 1093 case X86::ES: EmitByte(0x26, CurByte, OS); break; 1094 case X86::FS: EmitByte(0x64, CurByte, OS); break; 1095 case X86::GS: EmitByte(0x65, CurByte, OS); break; 1096 } 1097 } 1098 1099 /// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode. 1100 /// 1101 /// MemOperand is the operand # of the start of a memory operand if present. If 1102 /// Not present, it is -1. 1103 void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, 1104 int MemOperand, const MCInst &MI, 1105 const MCInstrDesc &Desc, 1106 const MCSubtargetInfo &STI, 1107 raw_ostream &OS) const { 1108 1109 // Emit the operand size opcode prefix as needed. 1110 if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32 1111 : X86II::OpSize16)) 1112 EmitByte(0x66, CurByte, OS); 1113 1114 // Emit the LOCK opcode prefix. 1115 if (TSFlags & X86II::LOCK) 1116 EmitByte(0xF0, CurByte, OS); 1117 1118 switch (TSFlags & X86II::OpPrefixMask) { 1119 case X86II::PD: // 66 1120 EmitByte(0x66, CurByte, OS); 1121 break; 1122 case X86II::XS: // F3 1123 EmitByte(0xF3, CurByte, OS); 1124 break; 1125 case X86II::XD: // F2 1126 EmitByte(0xF2, CurByte, OS); 1127 break; 1128 } 1129 1130 // Handle REX prefix. 1131 // FIXME: Can this come before F2 etc to simplify emission? 1132 if (is64BitMode(STI)) { 1133 if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc)) 1134 EmitByte(0x40 | REX, CurByte, OS); 1135 } 1136 1137 // 0x0F escape code must be emitted just before the opcode. 1138 switch (TSFlags & X86II::OpMapMask) { 1139 case X86II::TB: // Two-byte opcode map 1140 case X86II::T8: // 0F 38 1141 case X86II::TA: // 0F 3A 1142 EmitByte(0x0F, CurByte, OS); 1143 break; 1144 } 1145 1146 switch (TSFlags & X86II::OpMapMask) { 1147 case X86II::T8: // 0F 38 1148 EmitByte(0x38, CurByte, OS); 1149 break; 1150 case X86II::TA: // 0F 3A 1151 EmitByte(0x3A, CurByte, OS); 1152 break; 1153 } 1154 } 1155 1156 void X86MCCodeEmitter:: 1157 EncodeInstruction(const MCInst &MI, raw_ostream &OS, 1158 SmallVectorImpl<MCFixup> &Fixups, 1159 const MCSubtargetInfo &STI) const { 1160 unsigned Opcode = MI.getOpcode(); 1161 const MCInstrDesc &Desc = MCII.get(Opcode); 1162 uint64_t TSFlags = Desc.TSFlags; 1163 1164 // Pseudo instructions don't get encoded. 1165 if ((TSFlags & X86II::FormMask) == X86II::Pseudo) 1166 return; 1167 1168 unsigned NumOps = Desc.getNumOperands(); 1169 unsigned CurOp = X86II::getOperandBias(Desc); 1170 1171 // Keep track of the current byte being emitted. 1172 unsigned CurByte = 0; 1173 1174 // Encoding type for this instruction. 1175 uint64_t Encoding = TSFlags & X86II::EncodingMask; 1176 1177 // It uses the VEX.VVVV field? 1178 bool HasVEX_4V = TSFlags & X86II::VEX_4V; 1179 bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3; 1180 bool HasMemOp4 = TSFlags & X86II::MemOp4; 1181 const unsigned MemOp4_I8IMMOperand = 2; 1182 1183 // It uses the EVEX.aaa field? 1184 bool HasEVEX_K = TSFlags & X86II::EVEX_K; 1185 bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; 1186 1187 // Determine where the memory operand starts, if present. 1188 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode); 1189 if (MemoryOperand != -1) MemoryOperand += CurOp; 1190 1191 // Emit segment override opcode prefix as needed. 1192 if (MemoryOperand >= 0) 1193 EmitSegmentOverridePrefix(CurByte, MemoryOperand+X86::AddrSegmentReg, 1194 MI, OS); 1195 1196 // Emit the repeat opcode prefix as needed. 1197 if (TSFlags & X86II::REP) 1198 EmitByte(0xF3, CurByte, OS); 1199 1200 // Emit the address size opcode prefix as needed. 1201 bool need_address_override; 1202 uint64_t AdSize = TSFlags & X86II::AdSizeMask; 1203 if ((is16BitMode(STI) && AdSize == X86II::AdSize32) || 1204 (is32BitMode(STI) && AdSize == X86II::AdSize16) || 1205 (is64BitMode(STI) && AdSize == X86II::AdSize32)) { 1206 need_address_override = true; 1207 } else if (MemoryOperand < 0) { 1208 need_address_override = false; 1209 } else if (is64BitMode(STI)) { 1210 assert(!Is16BitMemOperand(MI, MemoryOperand, STI)); 1211 need_address_override = Is32BitMemOperand(MI, MemoryOperand); 1212 } else if (is32BitMode(STI)) { 1213 assert(!Is64BitMemOperand(MI, MemoryOperand)); 1214 need_address_override = Is16BitMemOperand(MI, MemoryOperand, STI); 1215 } else { 1216 assert(is16BitMode(STI)); 1217 assert(!Is64BitMemOperand(MI, MemoryOperand)); 1218 need_address_override = !Is16BitMemOperand(MI, MemoryOperand, STI); 1219 } 1220 1221 if (need_address_override) 1222 EmitByte(0x67, CurByte, OS); 1223 1224 if (Encoding == 0) 1225 EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS); 1226 else 1227 EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); 1228 1229 unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); 1230 1231 if (TSFlags & X86II::Has3DNow0F0FOpcode) 1232 BaseOpcode = 0x0F; // Weird 3DNow! encoding. 1233 1234 unsigned SrcRegNum = 0; 1235 switch (TSFlags & X86II::FormMask) { 1236 default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n"; 1237 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!"); 1238 case X86II::Pseudo: 1239 llvm_unreachable("Pseudo instruction shouldn't be emitted"); 1240 case X86II::RawFrmDstSrc: { 1241 unsigned siReg = MI.getOperand(1).getReg(); 1242 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) || 1243 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) || 1244 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) && 1245 "SI and DI register sizes do not match"); 1246 // Emit segment override opcode prefix as needed (not for %ds). 1247 if (MI.getOperand(2).getReg() != X86::DS) 1248 EmitSegmentOverridePrefix(CurByte, 2, MI, OS); 1249 // Emit AdSize prefix as needed. 1250 if ((!is32BitMode(STI) && siReg == X86::ESI) || 1251 (is32BitMode(STI) && siReg == X86::SI)) 1252 EmitByte(0x67, CurByte, OS); 1253 CurOp += 3; // Consume operands. 1254 EmitByte(BaseOpcode, CurByte, OS); 1255 break; 1256 } 1257 case X86II::RawFrmSrc: { 1258 unsigned siReg = MI.getOperand(0).getReg(); 1259 // Emit segment override opcode prefix as needed (not for %ds). 1260 if (MI.getOperand(1).getReg() != X86::DS) 1261 EmitSegmentOverridePrefix(CurByte, 1, MI, OS); 1262 // Emit AdSize prefix as needed. 1263 if ((!is32BitMode(STI) && siReg == X86::ESI) || 1264 (is32BitMode(STI) && siReg == X86::SI)) 1265 EmitByte(0x67, CurByte, OS); 1266 CurOp += 2; // Consume operands. 1267 EmitByte(BaseOpcode, CurByte, OS); 1268 break; 1269 } 1270 case X86II::RawFrmDst: { 1271 unsigned siReg = MI.getOperand(0).getReg(); 1272 // Emit AdSize prefix as needed. 1273 if ((!is32BitMode(STI) && siReg == X86::EDI) || 1274 (is32BitMode(STI) && siReg == X86::DI)) 1275 EmitByte(0x67, CurByte, OS); 1276 ++CurOp; // Consume operand. 1277 EmitByte(BaseOpcode, CurByte, OS); 1278 break; 1279 } 1280 case X86II::RawFrm: 1281 EmitByte(BaseOpcode, CurByte, OS); 1282 break; 1283 case X86II::RawFrmMemOffs: 1284 // Emit segment override opcode prefix as needed. 1285 EmitSegmentOverridePrefix(CurByte, 1, MI, OS); 1286 EmitByte(BaseOpcode, CurByte, OS); 1287 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1288 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1289 CurByte, OS, Fixups); 1290 ++CurOp; // skip segment operand 1291 break; 1292 case X86II::RawFrmImm8: 1293 EmitByte(BaseOpcode, CurByte, OS); 1294 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1295 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1296 CurByte, OS, Fixups); 1297 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte, 1298 OS, Fixups); 1299 break; 1300 case X86II::RawFrmImm16: 1301 EmitByte(BaseOpcode, CurByte, OS); 1302 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1303 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1304 CurByte, OS, Fixups); 1305 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte, 1306 OS, Fixups); 1307 break; 1308 1309 case X86II::AddRegFrm: 1310 EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS); 1311 break; 1312 1313 case X86II::MRMDestReg: 1314 EmitByte(BaseOpcode, CurByte, OS); 1315 SrcRegNum = CurOp + 1; 1316 1317 if (HasEVEX_K) // Skip writemask 1318 SrcRegNum++; 1319 1320 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1321 ++SrcRegNum; 1322 1323 EmitRegModRMByte(MI.getOperand(CurOp), 1324 GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS); 1325 CurOp = SrcRegNum + 1; 1326 break; 1327 1328 case X86II::MRMDestMem: 1329 EmitByte(BaseOpcode, CurByte, OS); 1330 SrcRegNum = CurOp + X86::AddrNumOperands; 1331 1332 if (HasEVEX_K) // Skip writemask 1333 SrcRegNum++; 1334 1335 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1336 ++SrcRegNum; 1337 1338 EmitMemModRMByte(MI, CurOp, 1339 GetX86RegNum(MI.getOperand(SrcRegNum)), 1340 TSFlags, CurByte, OS, Fixups, STI); 1341 CurOp = SrcRegNum + 1; 1342 break; 1343 1344 case X86II::MRMSrcReg: 1345 EmitByte(BaseOpcode, CurByte, OS); 1346 SrcRegNum = CurOp + 1; 1347 1348 if (HasEVEX_K) // Skip writemask 1349 SrcRegNum++; 1350 1351 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) 1352 ++SrcRegNum; 1353 1354 if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM) 1355 ++SrcRegNum; 1356 1357 EmitRegModRMByte(MI.getOperand(SrcRegNum), 1358 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); 1359 1360 // 2 operands skipped with HasMemOp4, compensate accordingly 1361 CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1; 1362 if (HasVEX_4VOp3) 1363 ++CurOp; 1364 // do not count the rounding control operand 1365 if (HasEVEX_RC) 1366 NumOps--; 1367 break; 1368 1369 case X86II::MRMSrcMem: { 1370 int AddrOperands = X86::AddrNumOperands; 1371 unsigned FirstMemOp = CurOp+1; 1372 1373 if (HasEVEX_K) { // Skip writemask 1374 ++AddrOperands; 1375 ++FirstMemOp; 1376 } 1377 1378 if (HasVEX_4V) { 1379 ++AddrOperands; 1380 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV). 1381 } 1382 if (HasMemOp4) // Skip second register source (encoded in I8IMM) 1383 ++FirstMemOp; 1384 1385 EmitByte(BaseOpcode, CurByte, OS); 1386 1387 EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), 1388 TSFlags, CurByte, OS, Fixups, STI); 1389 CurOp += AddrOperands + 1; 1390 if (HasVEX_4VOp3) 1391 ++CurOp; 1392 break; 1393 } 1394 1395 case X86II::MRMXr: 1396 case X86II::MRM0r: case X86II::MRM1r: 1397 case X86II::MRM2r: case X86II::MRM3r: 1398 case X86II::MRM4r: case X86II::MRM5r: 1399 case X86II::MRM6r: case X86II::MRM7r: { 1400 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1401 ++CurOp; 1402 if (HasEVEX_K) // Skip writemask 1403 ++CurOp; 1404 EmitByte(BaseOpcode, CurByte, OS); 1405 uint64_t Form = TSFlags & X86II::FormMask; 1406 EmitRegModRMByte(MI.getOperand(CurOp++), 1407 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r, 1408 CurByte, OS); 1409 break; 1410 } 1411 1412 case X86II::MRMXm: 1413 case X86II::MRM0m: case X86II::MRM1m: 1414 case X86II::MRM2m: case X86II::MRM3m: 1415 case X86II::MRM4m: case X86II::MRM5m: 1416 case X86II::MRM6m: case X86II::MRM7m: { 1417 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV). 1418 ++CurOp; 1419 if (HasEVEX_K) // Skip writemask 1420 ++CurOp; 1421 EmitByte(BaseOpcode, CurByte, OS); 1422 uint64_t Form = TSFlags & X86II::FormMask; 1423 EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m, 1424 TSFlags, CurByte, OS, Fixups, STI); 1425 CurOp += X86::AddrNumOperands; 1426 break; 1427 } 1428 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2: 1429 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8: 1430 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB: 1431 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1: 1432 case X86II::MRM_D4: case X86II::MRM_D5: case X86II::MRM_D6: 1433 case X86II::MRM_D7: case X86II::MRM_D8: case X86II::MRM_D9: 1434 case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC: 1435 case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF: 1436 case X86II::MRM_E0: case X86II::MRM_E1: case X86II::MRM_E2: 1437 case X86II::MRM_E3: case X86II::MRM_E4: case X86II::MRM_E5: 1438 case X86II::MRM_E8: case X86II::MRM_E9: case X86II::MRM_EA: 1439 case X86II::MRM_EB: case X86II::MRM_EC: case X86II::MRM_ED: 1440 case X86II::MRM_EE: case X86II::MRM_F0: case X86II::MRM_F1: 1441 case X86II::MRM_F2: case X86II::MRM_F3: case X86II::MRM_F4: 1442 case X86II::MRM_F5: case X86II::MRM_F6: case X86II::MRM_F7: 1443 case X86II::MRM_F8: case X86II::MRM_F9: case X86II::MRM_FA: 1444 case X86II::MRM_FB: case X86II::MRM_FC: case X86II::MRM_FD: 1445 case X86II::MRM_FE: case X86II::MRM_FF: 1446 EmitByte(BaseOpcode, CurByte, OS); 1447 1448 unsigned char MRM; 1449 switch (TSFlags & X86II::FormMask) { 1450 default: llvm_unreachable("Invalid Form"); 1451 case X86II::MRM_C0: MRM = 0xC0; break; 1452 case X86II::MRM_C1: MRM = 0xC1; break; 1453 case X86II::MRM_C2: MRM = 0xC2; break; 1454 case X86II::MRM_C3: MRM = 0xC3; break; 1455 case X86II::MRM_C4: MRM = 0xC4; break; 1456 case X86II::MRM_C8: MRM = 0xC8; break; 1457 case X86II::MRM_C9: MRM = 0xC9; break; 1458 case X86II::MRM_CA: MRM = 0xCA; break; 1459 case X86II::MRM_CB: MRM = 0xCB; break; 1460 case X86II::MRM_CF: MRM = 0xCF; break; 1461 case X86II::MRM_D0: MRM = 0xD0; break; 1462 case X86II::MRM_D1: MRM = 0xD1; break; 1463 case X86II::MRM_D4: MRM = 0xD4; break; 1464 case X86II::MRM_D5: MRM = 0xD5; break; 1465 case X86II::MRM_D6: MRM = 0xD6; break; 1466 case X86II::MRM_D7: MRM = 0xD7; break; 1467 case X86II::MRM_D8: MRM = 0xD8; break; 1468 case X86II::MRM_D9: MRM = 0xD9; break; 1469 case X86II::MRM_DA: MRM = 0xDA; break; 1470 case X86II::MRM_DB: MRM = 0xDB; break; 1471 case X86II::MRM_DC: MRM = 0xDC; break; 1472 case X86II::MRM_DD: MRM = 0xDD; break; 1473 case X86II::MRM_DE: MRM = 0xDE; break; 1474 case X86II::MRM_DF: MRM = 0xDF; break; 1475 case X86II::MRM_E0: MRM = 0xE0; break; 1476 case X86II::MRM_E1: MRM = 0xE1; break; 1477 case X86II::MRM_E2: MRM = 0xE2; break; 1478 case X86II::MRM_E3: MRM = 0xE3; break; 1479 case X86II::MRM_E4: MRM = 0xE4; break; 1480 case X86II::MRM_E5: MRM = 0xE5; break; 1481 case X86II::MRM_E8: MRM = 0xE8; break; 1482 case X86II::MRM_E9: MRM = 0xE9; break; 1483 case X86II::MRM_EA: MRM = 0xEA; break; 1484 case X86II::MRM_EB: MRM = 0xEB; break; 1485 case X86II::MRM_EC: MRM = 0xEC; break; 1486 case X86II::MRM_ED: MRM = 0xED; break; 1487 case X86II::MRM_EE: MRM = 0xEE; break; 1488 case X86II::MRM_F0: MRM = 0xF0; break; 1489 case X86II::MRM_F1: MRM = 0xF1; break; 1490 case X86II::MRM_F2: MRM = 0xF2; break; 1491 case X86II::MRM_F3: MRM = 0xF3; break; 1492 case X86II::MRM_F4: MRM = 0xF4; break; 1493 case X86II::MRM_F5: MRM = 0xF5; break; 1494 case X86II::MRM_F6: MRM = 0xF6; break; 1495 case X86II::MRM_F7: MRM = 0xF7; break; 1496 case X86II::MRM_F8: MRM = 0xF8; break; 1497 case X86II::MRM_F9: MRM = 0xF9; break; 1498 case X86II::MRM_FA: MRM = 0xFA; break; 1499 case X86II::MRM_FB: MRM = 0xFB; break; 1500 case X86II::MRM_FC: MRM = 0xFC; break; 1501 case X86II::MRM_FD: MRM = 0xFD; break; 1502 case X86II::MRM_FE: MRM = 0xFE; break; 1503 case X86II::MRM_FF: MRM = 0xFF; break; 1504 } 1505 EmitByte(MRM, CurByte, OS); 1506 break; 1507 } 1508 1509 // If there is a remaining operand, it must be a trailing immediate. Emit it 1510 // according to the right size for the instruction. Some instructions 1511 // (SSE4a extrq and insertq) have two trailing immediates. 1512 while (CurOp != NumOps && NumOps - CurOp <= 2) { 1513 // The last source register of a 4 operand instruction in AVX is encoded 1514 // in bits[7:4] of a immediate byte. 1515 if (TSFlags & X86II::VEX_I8IMM) { 1516 const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand 1517 : CurOp); 1518 ++CurOp; 1519 unsigned RegNum = GetX86RegNum(MO) << 4; 1520 if (X86II::isX86_64ExtendedReg(MO.getReg())) 1521 RegNum |= 1 << 7; 1522 // If there is an additional 5th operand it must be an immediate, which 1523 // is encoded in bits[3:0] 1524 if (CurOp != NumOps) { 1525 const MCOperand &MIMM = MI.getOperand(CurOp++); 1526 if (MIMM.isImm()) { 1527 unsigned Val = MIMM.getImm(); 1528 assert(Val < 16 && "Immediate operand value out of range"); 1529 RegNum |= Val; 1530 } 1531 } 1532 EmitImmediate(MCOperand::CreateImm(RegNum), MI.getLoc(), 1, FK_Data_1, 1533 CurByte, OS, Fixups); 1534 } else { 1535 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1536 X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), 1537 CurByte, OS, Fixups); 1538 } 1539 } 1540 1541 if (TSFlags & X86II::Has3DNow0F0FOpcode) 1542 EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS); 1543 1544 #ifndef NDEBUG 1545 // FIXME: Verify. 1546 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) { 1547 errs() << "Cannot encode all operands of: "; 1548 MI.dump(); 1549 errs() << '\n'; 1550 abort(); 1551 } 1552 #endif 1553 } 1554