1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_x64_BaseAssembler_x64_h 8 #define jit_x64_BaseAssembler_x64_h 9 10 #include "jit/x86-shared/BaseAssembler-x86-shared.h" 11 12 namespace js { 13 namespace jit { 14 15 namespace X86Encoding { 16 17 class BaseAssemblerX64 : public BaseAssembler { 18 public: 19 // Arithmetic operations: 20 addq_rr(RegisterID src,RegisterID dst)21 void addq_rr(RegisterID src, RegisterID dst) { 22 spew("addq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 23 m_formatter.oneByteOp64(OP_ADD_GvEv, src, dst); 24 } 25 addq_mr(int32_t offset,RegisterID base,RegisterID dst)26 void addq_mr(int32_t offset, RegisterID base, RegisterID dst) { 27 spew("addq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 28 m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, dst); 29 } 30 addq_mr(const void * addr,RegisterID dst)31 void addq_mr(const void* addr, RegisterID dst) { 32 spew("addq %p, %s", addr, GPReg64Name(dst)); 33 m_formatter.oneByteOp64(OP_ADD_GvEv, addr, dst); 34 } 35 addq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)36 void addq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 37 RegisterID dst) { 38 spew("addq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 39 GPReg64Name(dst)); 40 m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, index, scale, dst); 41 } 42 addq_rm(RegisterID src,int32_t offset,RegisterID base)43 void addq_rm(RegisterID src, int32_t offset, RegisterID base) { 44 spew("addq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 45 m_formatter.oneByteOp64(OP_ADD_EvGv, offset, base, src); 46 } 47 addq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)48 void addq_rm(RegisterID src, int32_t offset, RegisterID base, 49 RegisterID index, int scale) { 50 spew("addq %s, " MEM_obs, GPReg64Name(src), 51 ADDR_obs(offset, base, index, scale)); 52 m_formatter.oneByteOp64(OP_ADD_EvGv, offset, base, index, scale, src); 53 } 54 addq_ir(int32_t imm,RegisterID dst)55 void addq_ir(int32_t imm, RegisterID dst) { 56 spew("addq $%d, %s", imm, GPReg64Name(dst)); 57 if (CAN_SIGN_EXTEND_8_32(imm)) { 58 m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD); 59 m_formatter.immediate8s(imm); 60 } else { 61 if (dst == rax) { 62 m_formatter.oneByteOp64(OP_ADD_EAXIv); 63 } else { 64 m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD); 65 } 66 m_formatter.immediate32(imm); 67 } 68 } 69 addq_i32r(int32_t imm,RegisterID dst)70 void addq_i32r(int32_t imm, RegisterID dst) { 71 // 32-bit immediate always, for patching. 72 spew("addq $0x%04x, %s", uint32_t(imm), GPReg64Name(dst)); 73 if (dst == rax) { 74 m_formatter.oneByteOp64(OP_ADD_EAXIv); 75 } else { 76 m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD); 77 } 78 m_formatter.immediate32(imm); 79 } 80 addq_im(int32_t imm,int32_t offset,RegisterID base)81 void addq_im(int32_t imm, int32_t offset, RegisterID base) { 82 spew("addq $%d, " MEM_ob, imm, ADDR_ob(offset, base)); 83 if (CAN_SIGN_EXTEND_8_32(imm)) { 84 m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD); 85 m_formatter.immediate8s(imm); 86 } else { 87 m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD); 88 m_formatter.immediate32(imm); 89 } 90 } 91 addq_im(int32_t imm,const void * addr)92 void addq_im(int32_t imm, const void* addr) { 93 spew("addq $%d, %p", imm, addr); 94 if (CAN_SIGN_EXTEND_8_32(imm)) { 95 m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD); 96 m_formatter.immediate8s(imm); 97 } else { 98 m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD); 99 m_formatter.immediate32(imm); 100 } 101 } 102 andq_rr(RegisterID src,RegisterID dst)103 void andq_rr(RegisterID src, RegisterID dst) { 104 spew("andq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 105 m_formatter.oneByteOp64(OP_AND_GvEv, src, dst); 106 } 107 andq_mr(int32_t offset,RegisterID base,RegisterID dst)108 void andq_mr(int32_t offset, RegisterID base, RegisterID dst) { 109 spew("andq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 110 m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, dst); 111 } 112 andq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)113 void andq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 114 RegisterID dst) { 115 spew("andq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 116 GPReg64Name(dst)); 117 m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, index, scale, dst); 118 } 119 andq_mr(const void * addr,RegisterID dst)120 void andq_mr(const void* addr, RegisterID dst) { 121 spew("andq %p, %s", addr, GPReg64Name(dst)); 122 m_formatter.oneByteOp64(OP_AND_GvEv, addr, dst); 123 } 124 andq_rm(RegisterID src,int32_t offset,RegisterID base)125 void andq_rm(RegisterID src, int32_t offset, RegisterID base) { 126 spew("andq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 127 m_formatter.oneByteOp64(OP_AND_EvGv, offset, base, src); 128 } 129 andq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)130 void andq_rm(RegisterID src, int32_t offset, RegisterID base, 131 RegisterID index, int scale) { 132 spew("andq %s, " MEM_obs, GPReg64Name(src), 133 ADDR_obs(offset, base, index, scale)); 134 m_formatter.oneByteOp64(OP_AND_EvGv, offset, base, index, scale, src); 135 } 136 orq_mr(int32_t offset,RegisterID base,RegisterID dst)137 void orq_mr(int32_t offset, RegisterID base, RegisterID dst) { 138 spew("orq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 139 m_formatter.oneByteOp64(OP_OR_GvEv, offset, base, dst); 140 } 141 orq_mr(const void * addr,RegisterID dst)142 void orq_mr(const void* addr, RegisterID dst) { 143 spew("orq %p, %s", addr, GPReg64Name(dst)); 144 m_formatter.oneByteOp64(OP_OR_GvEv, addr, dst); 145 } 146 orq_rm(RegisterID src,int32_t offset,RegisterID base)147 void orq_rm(RegisterID src, int32_t offset, RegisterID base) { 148 spew("orq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 149 m_formatter.oneByteOp64(OP_OR_EvGv, offset, base, src); 150 } 151 orq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)152 void orq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index, 153 int scale) { 154 spew("orq %s, " MEM_obs, GPReg64Name(src), 155 ADDR_obs(offset, base, index, scale)); 156 m_formatter.oneByteOp64(OP_OR_EvGv, offset, base, index, scale, src); 157 } 158 xorq_mr(int32_t offset,RegisterID base,RegisterID dst)159 void xorq_mr(int32_t offset, RegisterID base, RegisterID dst) { 160 spew("xorq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 161 m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, dst); 162 } 163 xorq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)164 void xorq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 165 RegisterID dst) { 166 spew("xorq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 167 GPReg64Name(dst)); 168 m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, index, scale, dst); 169 } 170 xorq_mr(const void * addr,RegisterID dst)171 void xorq_mr(const void* addr, RegisterID dst) { 172 spew("xorq %p, %s", addr, GPReg64Name(dst)); 173 m_formatter.oneByteOp64(OP_XOR_GvEv, addr, dst); 174 } 175 xorq_rm(RegisterID src,int32_t offset,RegisterID base)176 void xorq_rm(RegisterID src, int32_t offset, RegisterID base) { 177 spew("xorq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 178 m_formatter.oneByteOp64(OP_XOR_EvGv, offset, base, src); 179 } 180 xorq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)181 void xorq_rm(RegisterID src, int32_t offset, RegisterID base, 182 RegisterID index, int scale) { 183 spew("xorq %s, " MEM_obs, GPReg64Name(src), 184 ADDR_obs(offset, base, index, scale)); 185 m_formatter.oneByteOp64(OP_XOR_EvGv, offset, base, index, scale, src); 186 } 187 bswapq_r(RegisterID dst)188 void bswapq_r(RegisterID dst) { 189 spew("bswapq %s", GPReg64Name(dst)); 190 m_formatter.twoByteOp64(OP2_BSWAP, dst); 191 } 192 bsrq_rr(RegisterID src,RegisterID dst)193 void bsrq_rr(RegisterID src, RegisterID dst) { 194 spew("bsrq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 195 m_formatter.twoByteOp64(OP2_BSR_GvEv, src, dst); 196 } 197 bsfq_rr(RegisterID src,RegisterID dst)198 void bsfq_rr(RegisterID src, RegisterID dst) { 199 spew("bsfq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 200 m_formatter.twoByteOp64(OP2_BSF_GvEv, src, dst); 201 } 202 lzcntq_rr(RegisterID src,RegisterID dst)203 void lzcntq_rr(RegisterID src, RegisterID dst) { 204 spew("lzcntq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 205 m_formatter.legacySSEPrefix(VEX_SS); 206 m_formatter.twoByteOp64(OP2_LZCNT_GvEv, src, dst); 207 } 208 tzcntq_rr(RegisterID src,RegisterID dst)209 void tzcntq_rr(RegisterID src, RegisterID dst) { 210 spew("tzcntq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 211 m_formatter.legacySSEPrefix(VEX_SS); 212 m_formatter.twoByteOp64(OP2_TZCNT_GvEv, src, dst); 213 } 214 popcntq_rr(RegisterID src,RegisterID dst)215 void popcntq_rr(RegisterID src, RegisterID dst) { 216 spew("popcntq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 217 m_formatter.legacySSEPrefix(VEX_SS); 218 m_formatter.twoByteOp64(OP2_POPCNT_GvEv, src, dst); 219 } 220 andq_ir(int32_t imm,RegisterID dst)221 void andq_ir(int32_t imm, RegisterID dst) { 222 spew("andq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst)); 223 if (CAN_SIGN_EXTEND_8_32(imm)) { 224 m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND); 225 m_formatter.immediate8s(imm); 226 } else { 227 if (dst == rax) { 228 m_formatter.oneByteOp64(OP_AND_EAXIv); 229 } else { 230 m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND); 231 } 232 m_formatter.immediate32(imm); 233 } 234 } 235 negq_r(RegisterID dst)236 void negq_r(RegisterID dst) { 237 spew("negq %s", GPReg64Name(dst)); 238 m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NEG); 239 } 240 orq_rr(RegisterID src,RegisterID dst)241 void orq_rr(RegisterID src, RegisterID dst) { 242 spew("orq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 243 m_formatter.oneByteOp64(OP_OR_GvEv, src, dst); 244 } 245 orq_ir(int32_t imm,RegisterID dst)246 void orq_ir(int32_t imm, RegisterID dst) { 247 spew("orq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst)); 248 if (CAN_SIGN_EXTEND_8_32(imm)) { 249 m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR); 250 m_formatter.immediate8s(imm); 251 } else { 252 if (dst == rax) { 253 m_formatter.oneByteOp64(OP_OR_EAXIv); 254 } else { 255 m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR); 256 } 257 m_formatter.immediate32(imm); 258 } 259 } 260 notq_r(RegisterID dst)261 void notq_r(RegisterID dst) { 262 spew("notq %s", GPReg64Name(dst)); 263 m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NOT); 264 } 265 subq_rr(RegisterID src,RegisterID dst)266 void subq_rr(RegisterID src, RegisterID dst) { 267 spew("subq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 268 m_formatter.oneByteOp64(OP_SUB_GvEv, src, dst); 269 } 270 subq_rm(RegisterID src,int32_t offset,RegisterID base)271 void subq_rm(RegisterID src, int32_t offset, RegisterID base) { 272 spew("subq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 273 m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, src); 274 } 275 subq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)276 void subq_rm(RegisterID src, int32_t offset, RegisterID base, 277 RegisterID index, int scale) { 278 spew("subq %s, " MEM_obs, GPReg64Name(src), 279 ADDR_obs(offset, base, index, scale)); 280 m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, index, scale, src); 281 } 282 subq_mr(int32_t offset,RegisterID base,RegisterID dst)283 void subq_mr(int32_t offset, RegisterID base, RegisterID dst) { 284 spew("subq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 285 m_formatter.oneByteOp64(OP_SUB_GvEv, offset, base, dst); 286 } 287 subq_mr(const void * addr,RegisterID dst)288 void subq_mr(const void* addr, RegisterID dst) { 289 spew("subq %p, %s", addr, GPReg64Name(dst)); 290 m_formatter.oneByteOp64(OP_SUB_GvEv, addr, dst); 291 } 292 subq_ir(int32_t imm,RegisterID dst)293 void subq_ir(int32_t imm, RegisterID dst) { 294 spew("subq $%d, %s", imm, GPReg64Name(dst)); 295 if (CAN_SIGN_EXTEND_8_32(imm)) { 296 m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB); 297 m_formatter.immediate8s(imm); 298 } else { 299 if (dst == rax) { 300 m_formatter.oneByteOp64(OP_SUB_EAXIv); 301 } else { 302 m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB); 303 } 304 m_formatter.immediate32(imm); 305 } 306 } 307 xorq_rr(RegisterID src,RegisterID dst)308 void xorq_rr(RegisterID src, RegisterID dst) { 309 spew("xorq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 310 m_formatter.oneByteOp64(OP_XOR_GvEv, src, dst); 311 } 312 xorq_ir(int32_t imm,RegisterID dst)313 void xorq_ir(int32_t imm, RegisterID dst) { 314 spew("xorq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst)); 315 if (CAN_SIGN_EXTEND_8_32(imm)) { 316 m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR); 317 m_formatter.immediate8s(imm); 318 } else { 319 if (dst == rax) { 320 m_formatter.oneByteOp64(OP_XOR_EAXIv); 321 } else { 322 m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR); 323 } 324 m_formatter.immediate32(imm); 325 } 326 } 327 sarq_CLr(RegisterID dst)328 void sarq_CLr(RegisterID dst) { 329 spew("sarq %%cl, %s", GPReg64Name(dst)); 330 m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR); 331 } 332 shlq_CLr(RegisterID dst)333 void shlq_CLr(RegisterID dst) { 334 spew("shlq %%cl, %s", GPReg64Name(dst)); 335 m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL); 336 } 337 shrq_CLr(RegisterID dst)338 void shrq_CLr(RegisterID dst) { 339 spew("shrq %%cl, %s", GPReg64Name(dst)); 340 m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR); 341 } 342 sarq_ir(int32_t imm,RegisterID dst)343 void sarq_ir(int32_t imm, RegisterID dst) { 344 MOZ_ASSERT(imm < 64); 345 spew("sarq $%d, %s", imm, GPReg64Name(dst)); 346 if (imm == 1) { 347 m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR); 348 } else { 349 m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR); 350 m_formatter.immediate8u(imm); 351 } 352 } 353 shlq_ir(int32_t imm,RegisterID dst)354 void shlq_ir(int32_t imm, RegisterID dst) { 355 MOZ_ASSERT(imm < 64); 356 spew("shlq $%d, %s", imm, GPReg64Name(dst)); 357 if (imm == 1) { 358 m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL); 359 } else { 360 m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL); 361 m_formatter.immediate8u(imm); 362 } 363 } 364 shrq_ir(int32_t imm,RegisterID dst)365 void shrq_ir(int32_t imm, RegisterID dst) { 366 MOZ_ASSERT(imm < 64); 367 spew("shrq $%d, %s", imm, GPReg64Name(dst)); 368 if (imm == 1) { 369 m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR); 370 } else { 371 m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR); 372 m_formatter.immediate8u(imm); 373 } 374 } 375 rolq_ir(int32_t imm,RegisterID dst)376 void rolq_ir(int32_t imm, RegisterID dst) { 377 MOZ_ASSERT(imm < 64); 378 spew("rolq $%d, %s", imm, GPReg64Name(dst)); 379 if (imm == 1) { 380 m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL); 381 } else { 382 m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL); 383 m_formatter.immediate8u(imm); 384 } 385 } rolq_CLr(RegisterID dst)386 void rolq_CLr(RegisterID dst) { 387 spew("rolq %%cl, %s", GPReg64Name(dst)); 388 m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL); 389 } 390 rorq_ir(int32_t imm,RegisterID dst)391 void rorq_ir(int32_t imm, RegisterID dst) { 392 MOZ_ASSERT(imm < 64); 393 spew("rorq $%d, %s", imm, GPReg64Name(dst)); 394 if (imm == 1) { 395 m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR); 396 } else { 397 m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR); 398 m_formatter.immediate8u(imm); 399 } 400 } rorq_CLr(RegisterID dst)401 void rorq_CLr(RegisterID dst) { 402 spew("rorq %%cl, %s", GPReg64Name(dst)); 403 m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR); 404 } 405 imulq_rr(RegisterID src,RegisterID dst)406 void imulq_rr(RegisterID src, RegisterID dst) { 407 spew("imulq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 408 m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst); 409 } 410 imulq_mr(int32_t offset,RegisterID base,RegisterID dst)411 void imulq_mr(int32_t offset, RegisterID base, RegisterID dst) { 412 spew("imulq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 413 m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst); 414 } 415 imulq_ir(int32_t value,RegisterID src,RegisterID dst)416 void imulq_ir(int32_t value, RegisterID src, RegisterID dst) { 417 spew("imulq $%d, %s, %s", value, GPReg64Name(src), GPReg64Name(dst)); 418 if (CAN_SIGN_EXTEND_8_32(value)) { 419 m_formatter.oneByteOp64(OP_IMUL_GvEvIb, src, dst); 420 m_formatter.immediate8s(value); 421 } else { 422 m_formatter.oneByteOp64(OP_IMUL_GvEvIz, src, dst); 423 m_formatter.immediate32(value); 424 } 425 } 426 cqo()427 void cqo() { 428 spew("cqo "); 429 m_formatter.oneByteOp64(OP_CDQ); 430 } 431 idivq_r(RegisterID divisor)432 void idivq_r(RegisterID divisor) { 433 spew("idivq %s", GPReg64Name(divisor)); 434 m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV); 435 } 436 divq_r(RegisterID divisor)437 void divq_r(RegisterID divisor) { 438 spew("divq %s", GPReg64Name(divisor)); 439 m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV); 440 } 441 442 // Comparisons: 443 cmpq_rr(RegisterID rhs,RegisterID lhs)444 void cmpq_rr(RegisterID rhs, RegisterID lhs) { 445 spew("cmpq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs)); 446 m_formatter.oneByteOp64(OP_CMP_GvEv, rhs, lhs); 447 } 448 cmpq_rm(RegisterID rhs,int32_t offset,RegisterID base)449 void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base) { 450 spew("cmpq %s, " MEM_ob, GPReg64Name(rhs), ADDR_ob(offset, base)); 451 m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, rhs); 452 } 453 cmpq_rm(RegisterID rhs,int32_t offset,RegisterID base,RegisterID index,int scale)454 void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base, 455 RegisterID index, int scale) { 456 spew("cmpq %s, " MEM_obs, GPReg64Name(rhs), 457 ADDR_obs(offset, base, index, scale)); 458 m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, index, scale, rhs); 459 } 460 cmpq_mr(int32_t offset,RegisterID base,RegisterID lhs)461 void cmpq_mr(int32_t offset, RegisterID base, RegisterID lhs) { 462 spew("cmpq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(lhs)); 463 m_formatter.oneByteOp64(OP_CMP_GvEv, offset, base, lhs); 464 } 465 cmpq_ir(int32_t rhs,RegisterID lhs)466 void cmpq_ir(int32_t rhs, RegisterID lhs) { 467 if (rhs == 0) { 468 testq_rr(lhs, lhs); 469 return; 470 } 471 472 spew("cmpq $0x%" PRIx64 ", %s", uint64_t(rhs), GPReg64Name(lhs)); 473 if (CAN_SIGN_EXTEND_8_32(rhs)) { 474 m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP); 475 m_formatter.immediate8s(rhs); 476 } else { 477 if (lhs == rax) { 478 m_formatter.oneByteOp64(OP_CMP_EAXIv); 479 } else { 480 m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP); 481 } 482 m_formatter.immediate32(rhs); 483 } 484 } 485 cmpq_im(int32_t rhs,int32_t offset,RegisterID base)486 void cmpq_im(int32_t rhs, int32_t offset, RegisterID base) { 487 spew("cmpq $0x%" PRIx64 ", " MEM_ob, uint64_t(rhs), 488 ADDR_ob(offset, base)); 489 if (CAN_SIGN_EXTEND_8_32(rhs)) { 490 m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP); 491 m_formatter.immediate8s(rhs); 492 } else { 493 m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP); 494 m_formatter.immediate32(rhs); 495 } 496 } 497 cmpq_im(int32_t rhs,int32_t offset,RegisterID base,RegisterID index,int scale)498 void cmpq_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index, 499 int scale) { 500 spew("cmpq $0x%x, " MEM_obs, uint32_t(rhs), 501 ADDR_obs(offset, base, index, scale)); 502 if (CAN_SIGN_EXTEND_8_32(rhs)) { 503 m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, index, scale, 504 GROUP1_OP_CMP); 505 m_formatter.immediate8s(rhs); 506 } else { 507 m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, index, scale, 508 GROUP1_OP_CMP); 509 m_formatter.immediate32(rhs); 510 } 511 } cmpq_im(int32_t rhs,const void * addr)512 void cmpq_im(int32_t rhs, const void* addr) { 513 spew("cmpq $0x%" PRIx64 ", %p", uint64_t(rhs), addr); 514 if (CAN_SIGN_EXTEND_8_32(rhs)) { 515 m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP); 516 m_formatter.immediate8s(rhs); 517 } else { 518 m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP); 519 m_formatter.immediate32(rhs); 520 } 521 } cmpq_rm(RegisterID rhs,const void * addr)522 void cmpq_rm(RegisterID rhs, const void* addr) { 523 spew("cmpq %s, %p", GPReg64Name(rhs), addr); 524 m_formatter.oneByteOp64(OP_CMP_EvGv, addr, rhs); 525 } 526 testq_rr(RegisterID rhs,RegisterID lhs)527 void testq_rr(RegisterID rhs, RegisterID lhs) { 528 spew("testq %s, %s", GPReg64Name(rhs), GPReg64Name(lhs)); 529 m_formatter.oneByteOp64(OP_TEST_EvGv, lhs, rhs); 530 } 531 testq_ir(int32_t rhs,RegisterID lhs)532 void testq_ir(int32_t rhs, RegisterID lhs) { 533 // If the mask fits in a 32-bit immediate, we can use testl with a 534 // 32-bit subreg. 535 if (CAN_ZERO_EXTEND_32_64(rhs)) { 536 testl_ir(rhs, lhs); 537 return; 538 } 539 spew("testq $0x%" PRIx64 ", %s", uint64_t(rhs), GPReg64Name(lhs)); 540 if (lhs == rax) { 541 m_formatter.oneByteOp64(OP_TEST_EAXIv); 542 } else { 543 m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST); 544 } 545 m_formatter.immediate32(rhs); 546 } 547 testq_i32m(int32_t rhs,int32_t offset,RegisterID base)548 void testq_i32m(int32_t rhs, int32_t offset, RegisterID base) { 549 spew("testq $0x%" PRIx64 ", " MEM_ob, uint64_t(rhs), 550 ADDR_ob(offset, base)); 551 m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST); 552 m_formatter.immediate32(rhs); 553 } 554 testq_i32m(int32_t rhs,int32_t offset,RegisterID base,RegisterID index,int scale)555 void testq_i32m(int32_t rhs, int32_t offset, RegisterID base, 556 RegisterID index, int scale) { 557 spew("testq $0x%4x, " MEM_obs, uint32_t(rhs), 558 ADDR_obs(offset, base, index, scale)); 559 m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, index, scale, 560 GROUP3_OP_TEST); 561 m_formatter.immediate32(rhs); 562 } 563 564 // Various move ops: 565 cmovCCq_rr(Condition cond,RegisterID src,RegisterID dst)566 void cmovCCq_rr(Condition cond, RegisterID src, RegisterID dst) { 567 spew("cmov%s %s, %s", CCName(cond), GPReg64Name(src), GPReg64Name(dst)); 568 m_formatter.twoByteOp64(cmovccOpcode(cond), src, dst); 569 } cmovCCq_mr(Condition cond,int32_t offset,RegisterID base,RegisterID dst)570 void cmovCCq_mr(Condition cond, int32_t offset, RegisterID base, 571 RegisterID dst) { 572 spew("cmov%s " MEM_ob ", %s", CCName(cond), ADDR_ob(offset, base), 573 GPReg64Name(dst)); 574 m_formatter.twoByteOp64(cmovccOpcode(cond), offset, base, dst); 575 } cmovCCq_mr(Condition cond,int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)576 void cmovCCq_mr(Condition cond, int32_t offset, RegisterID base, 577 RegisterID index, int scale, RegisterID dst) { 578 spew("cmov%s " MEM_obs ", %s", CCName(cond), 579 ADDR_obs(offset, base, index, scale), GPReg64Name(dst)); 580 m_formatter.twoByteOp64(cmovccOpcode(cond), offset, base, index, scale, 581 dst); 582 } 583 cmpxchgq(RegisterID src,int32_t offset,RegisterID base)584 void cmpxchgq(RegisterID src, int32_t offset, RegisterID base) { 585 spew("cmpxchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 586 m_formatter.twoByteOp64(OP2_CMPXCHG_GvEw, offset, base, src); 587 } 588 cmpxchgq(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)589 void cmpxchgq(RegisterID src, int32_t offset, RegisterID base, 590 RegisterID index, int scale) { 591 spew("cmpxchgq %s, " MEM_obs, GPReg64Name(src), 592 ADDR_obs(offset, base, index, scale)); 593 m_formatter.twoByteOp64(OP2_CMPXCHG_GvEw, offset, base, index, scale, src); 594 } 595 lock_xaddq_rm(RegisterID srcdest,int32_t offset,RegisterID base)596 void lock_xaddq_rm(RegisterID srcdest, int32_t offset, RegisterID base) { 597 spew("lock xaddq %s, " MEM_ob, GPReg64Name(srcdest), ADDR_ob(offset, base)); 598 m_formatter.oneByteOp(PRE_LOCK); 599 m_formatter.twoByteOp64(OP2_XADD_EvGv, offset, base, srcdest); 600 } 601 lock_xaddq_rm(RegisterID srcdest,int32_t offset,RegisterID base,RegisterID index,int scale)602 void lock_xaddq_rm(RegisterID srcdest, int32_t offset, RegisterID base, 603 RegisterID index, int scale) { 604 spew("lock xaddq %s, " MEM_obs, GPReg64Name(srcdest), 605 ADDR_obs(offset, base, index, scale)); 606 m_formatter.oneByteOp(PRE_LOCK); 607 m_formatter.twoByteOp64(OP2_XADD_EvGv, offset, base, index, scale, srcdest); 608 } 609 xchgq_rr(RegisterID src,RegisterID dst)610 void xchgq_rr(RegisterID src, RegisterID dst) { 611 spew("xchgq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 612 m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst); 613 } xchgq_rm(RegisterID src,int32_t offset,RegisterID base)614 void xchgq_rm(RegisterID src, int32_t offset, RegisterID base) { 615 spew("xchgq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 616 m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src); 617 } xchgq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)618 void xchgq_rm(RegisterID src, int32_t offset, RegisterID base, 619 RegisterID index, int scale) { 620 spew("xchgq %s, " MEM_obs, GPReg64Name(src), 621 ADDR_obs(offset, base, index, scale)); 622 m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src); 623 } 624 movq_rr(RegisterID src,RegisterID dst)625 void movq_rr(RegisterID src, RegisterID dst) { 626 spew("movq %s, %s", GPReg64Name(src), GPReg64Name(dst)); 627 m_formatter.oneByteOp64(OP_MOV_GvEv, src, dst); 628 } 629 movq_rm(RegisterID src,int32_t offset,RegisterID base)630 void movq_rm(RegisterID src, int32_t offset, RegisterID base) { 631 spew("movq %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base)); 632 m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, src); 633 } 634 movq_rm_disp32(RegisterID src,int32_t offset,RegisterID base)635 void movq_rm_disp32(RegisterID src, int32_t offset, RegisterID base) { 636 spew("movq %s, " MEM_o32b, GPReg64Name(src), ADDR_o32b(offset, base)); 637 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, offset, base, src); 638 } 639 movq_rm(RegisterID src,int32_t offset,RegisterID base,RegisterID index,int scale)640 void movq_rm(RegisterID src, int32_t offset, RegisterID base, 641 RegisterID index, int scale) { 642 spew("movq %s, " MEM_obs, GPReg64Name(src), 643 ADDR_obs(offset, base, index, scale)); 644 m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, index, scale, src); 645 } 646 movq_rm(RegisterID src,const void * addr)647 void movq_rm(RegisterID src, const void* addr) { 648 if (src == rax && !IsAddressImmediate(addr)) { 649 movq_EAXm(addr); 650 return; 651 } 652 653 spew("movq %s, %p", GPReg64Name(src), addr); 654 m_formatter.oneByteOp64(OP_MOV_EvGv, addr, src); 655 } 656 movq_mEAX(const void * addr)657 void movq_mEAX(const void* addr) { 658 if (IsAddressImmediate(addr)) { 659 movq_mr(addr, rax); 660 return; 661 } 662 663 spew("movq %p, %%rax", addr); 664 m_formatter.oneByteOp64(OP_MOV_EAXOv); 665 m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); 666 } 667 movq_EAXm(const void * addr)668 void movq_EAXm(const void* addr) { 669 if (IsAddressImmediate(addr)) { 670 movq_rm(rax, addr); 671 return; 672 } 673 674 spew("movq %%rax, %p", addr); 675 m_formatter.oneByteOp64(OP_MOV_OvEAX); 676 m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); 677 } 678 movq_mr(int32_t offset,RegisterID base,RegisterID dst)679 void movq_mr(int32_t offset, RegisterID base, RegisterID dst) { 680 spew("movq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 681 m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, dst); 682 } 683 movq_mr_disp32(int32_t offset,RegisterID base,RegisterID dst)684 void movq_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) { 685 spew("movq " MEM_o32b ", %s", ADDR_o32b(offset, base), 686 GPReg64Name(dst)); 687 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, offset, base, dst); 688 } 689 movq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)690 void movq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 691 RegisterID dst) { 692 spew("movq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 693 GPReg64Name(dst)); 694 m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, index, scale, dst); 695 } 696 movq_mr(const void * addr,RegisterID dst)697 void movq_mr(const void* addr, RegisterID dst) { 698 if (dst == rax && !IsAddressImmediate(addr)) { 699 movq_mEAX(addr); 700 return; 701 } 702 703 spew("movq %p, %s", addr, GPReg64Name(dst)); 704 m_formatter.oneByteOp64(OP_MOV_GvEv, addr, dst); 705 } 706 leaq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)707 void leaq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 708 RegisterID dst) { 709 spew("leaq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 710 GPReg64Name(dst)); 711 m_formatter.oneByteOp64(OP_LEA, offset, base, index, scale, dst); 712 } 713 movq_i32m(int32_t imm,int32_t offset,RegisterID base)714 void movq_i32m(int32_t imm, int32_t offset, RegisterID base) { 715 spew("movq $%d, " MEM_ob, imm, ADDR_ob(offset, base)); 716 m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, GROUP11_MOV); 717 m_formatter.immediate32(imm); 718 } movq_i32m(int32_t imm,int32_t offset,RegisterID base,RegisterID index,int scale)719 void movq_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index, 720 int scale) { 721 spew("movq $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale)); 722 m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, index, scale, 723 GROUP11_MOV); 724 m_formatter.immediate32(imm); 725 } movq_i32m(int32_t imm,const void * addr)726 void movq_i32m(int32_t imm, const void* addr) { 727 spew("movq $%d, %p", imm, addr); 728 m_formatter.oneByteOp64(OP_GROUP11_EvIz, addr, GROUP11_MOV); 729 m_formatter.immediate32(imm); 730 } 731 732 // Note that this instruction sign-extends its 32-bit immediate field to 64 733 // bits and loads the 64-bit value into a 64-bit register. 734 // 735 // Note also that this is similar to the movl_i32r instruction, except that 736 // movl_i32r *zero*-extends its 32-bit immediate, and it has smaller code 737 // size, so it's preferred for values which could use either. movq_i32r(int32_t imm,RegisterID dst)738 void movq_i32r(int32_t imm, RegisterID dst) { 739 spew("movq $%d, %s", imm, GPRegName(dst)); 740 m_formatter.oneByteOp64(OP_GROUP11_EvIz, dst, GROUP11_MOV); 741 m_formatter.immediate32(imm); 742 } 743 movq_i64r(int64_t imm,RegisterID dst)744 void movq_i64r(int64_t imm, RegisterID dst) { 745 spew("movabsq $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst)); 746 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst); 747 m_formatter.immediate64(imm); 748 } 749 movsbq_rr(RegisterID src,RegisterID dst)750 void movsbq_rr(RegisterID src, RegisterID dst) { 751 spew("movsbq %s, %s", GPReg32Name(src), GPReg64Name(dst)); 752 m_formatter.twoByteOp64(OP2_MOVSX_GvEb, src, dst); 753 } movsbq_mr(int32_t offset,RegisterID base,RegisterID dst)754 void movsbq_mr(int32_t offset, RegisterID base, RegisterID dst) { 755 spew("movsbq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 756 m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, dst); 757 } movsbq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)758 void movsbq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 759 RegisterID dst) { 760 spew("movsbq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 761 GPReg64Name(dst)); 762 m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, index, scale, dst); 763 } 764 movswq_rr(RegisterID src,RegisterID dst)765 void movswq_rr(RegisterID src, RegisterID dst) { 766 spew("movswq %s, %s", GPReg32Name(src), GPReg64Name(dst)); 767 m_formatter.twoByteOp64(OP2_MOVSX_GvEw, src, dst); 768 } movswq_mr(int32_t offset,RegisterID base,RegisterID dst)769 void movswq_mr(int32_t offset, RegisterID base, RegisterID dst) { 770 spew("movswq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 771 m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, dst); 772 } movswq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)773 void movswq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 774 RegisterID dst) { 775 spew("movswq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 776 GPReg64Name(dst)); 777 m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, index, scale, dst); 778 } 779 movslq_rr(RegisterID src,RegisterID dst)780 void movslq_rr(RegisterID src, RegisterID dst) { 781 spew("movslq %s, %s", GPReg32Name(src), GPReg64Name(dst)); 782 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, src, dst); 783 } movslq_mr(int32_t offset,RegisterID base,RegisterID dst)784 void movslq_mr(int32_t offset, RegisterID base, RegisterID dst) { 785 spew("movslq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 786 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, dst); 787 } movslq_mr(int32_t offset,RegisterID base,RegisterID index,int scale,RegisterID dst)788 void movslq_mr(int32_t offset, RegisterID base, RegisterID index, int scale, 789 RegisterID dst) { 790 spew("movslq " MEM_obs ", %s", ADDR_obs(offset, base, index, scale), 791 GPReg64Name(dst)); 792 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, index, scale, dst); 793 } 794 movl_ripr(RegisterID dst)795 [[nodiscard]] JmpSrc movl_ripr(RegisterID dst) { 796 m_formatter.oneByteRipOp(OP_MOV_GvEv, 0, (RegisterID)dst); 797 JmpSrc label(m_formatter.size()); 798 spew("movl " MEM_o32r ", %s", ADDR_o32r(label.offset()), 799 GPReg32Name(dst)); 800 return label; 801 } 802 movl_rrip(RegisterID src)803 [[nodiscard]] JmpSrc movl_rrip(RegisterID src) { 804 m_formatter.oneByteRipOp(OP_MOV_EvGv, 0, (RegisterID)src); 805 JmpSrc label(m_formatter.size()); 806 spew("movl %s, " MEM_o32r "", GPReg32Name(src), 807 ADDR_o32r(label.offset())); 808 return label; 809 } 810 movq_ripr(RegisterID dst)811 [[nodiscard]] JmpSrc movq_ripr(RegisterID dst) { 812 m_formatter.oneByteRipOp64(OP_MOV_GvEv, 0, dst); 813 JmpSrc label(m_formatter.size()); 814 spew("movq " MEM_o32r ", %s", ADDR_o32r(label.offset()), 815 GPRegName(dst)); 816 return label; 817 } 818 movq_rrip(RegisterID src)819 [[nodiscard]] JmpSrc movq_rrip(RegisterID src) { 820 m_formatter.oneByteRipOp64(OP_MOV_EvGv, 0, (RegisterID)src); 821 JmpSrc label(m_formatter.size()); 822 spew("movq %s, " MEM_o32r "", GPRegName(src), 823 ADDR_o32r(label.offset())); 824 return label; 825 } 826 leaq_mr(int32_t offset,RegisterID base,RegisterID dst)827 void leaq_mr(int32_t offset, RegisterID base, RegisterID dst) { 828 spew("leaq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst)); 829 m_formatter.oneByteOp64(OP_LEA, offset, base, dst); 830 } 831 leaq_rip(RegisterID dst)832 [[nodiscard]] JmpSrc leaq_rip(RegisterID dst) { 833 m_formatter.oneByteRipOp64(OP_LEA, 0, dst); 834 JmpSrc label(m_formatter.size()); 835 spew("leaq " MEM_o32r ", %s", ADDR_o32r(label.offset()), 836 GPRegName(dst)); 837 return label; 838 } 839 840 // Flow control: 841 jmp_rip(int ripOffset)842 void jmp_rip(int ripOffset) { 843 // rip-relative addressing. 844 spew("jmp *%d(%%rip)", ripOffset); 845 m_formatter.oneByteRipOp(OP_GROUP5_Ev, ripOffset, GROUP5_OP_JMPN); 846 } 847 immediate64(int64_t imm)848 void immediate64(int64_t imm) { 849 spew(".quad %lld", (long long)imm); 850 m_formatter.immediate64(imm); 851 } 852 853 // SSE operations: 854 vcvtsq2sd_rr(RegisterID src1,XMMRegisterID src0,XMMRegisterID dst)855 void vcvtsq2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) { 856 twoByteOpInt64Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0, 857 dst); 858 } vcvtsq2ss_rr(RegisterID src1,XMMRegisterID src0,XMMRegisterID dst)859 void vcvtsq2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) { 860 twoByteOpInt64Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0, 861 dst); 862 } 863 vcvtsi2sdq_rr(RegisterID src,XMMRegisterID dst)864 void vcvtsi2sdq_rr(RegisterID src, XMMRegisterID dst) { 865 twoByteOpInt64Simd("vcvtsi2sdq", VEX_SD, OP2_CVTSI2SD_VsdEd, src, 866 invalid_xmm, dst); 867 } 868 vcvttsd2sq_rr(XMMRegisterID src,RegisterID dst)869 void vcvttsd2sq_rr(XMMRegisterID src, RegisterID dst) { 870 twoByteOpSimdInt64("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst); 871 } 872 vcvttss2sq_rr(XMMRegisterID src,RegisterID dst)873 void vcvttss2sq_rr(XMMRegisterID src, RegisterID dst) { 874 twoByteOpSimdInt64("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst); 875 } 876 vmovq_rr(XMMRegisterID src,RegisterID dst)877 void vmovq_rr(XMMRegisterID src, RegisterID dst) { 878 // While this is called "vmovq", it actually uses the vmovd encoding 879 // with a REX prefix modifying it to be 64-bit. 880 twoByteOpSimdInt64("vmovq", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst, 881 (RegisterID)src); 882 } 883 vpextrq_irr(unsigned lane,XMMRegisterID src,RegisterID dst)884 void vpextrq_irr(unsigned lane, XMMRegisterID src, RegisterID dst) { 885 MOZ_ASSERT(lane < 2); 886 threeByteOpImmSimdInt64("vpextrq", VEX_PD, OP3_PEXTRQ_EvVdqIb, ESCAPE_3A, 887 lane, src, dst); 888 } 889 vpinsrq_irr(unsigned lane,RegisterID src1,XMMRegisterID src0,XMMRegisterID dst)890 void vpinsrq_irr(unsigned lane, RegisterID src1, XMMRegisterID src0, 891 XMMRegisterID dst) { 892 MOZ_ASSERT(lane < 2); 893 threeByteOpImmInt64Simd("vpinsrq", VEX_PD, OP3_PINSRQ_VdqEvIb, ESCAPE_3A, 894 lane, src1, src0, dst); 895 } 896 vmovq_rr(RegisterID src,XMMRegisterID dst)897 void vmovq_rr(RegisterID src, XMMRegisterID dst) { 898 // While this is called "vmovq", it actually uses the vmovd encoding 899 // with a REX prefix modifying it to be 64-bit. 900 twoByteOpInt64Simd("vmovq", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst); 901 } 902 vmovsd_ripr(XMMRegisterID dst)903 [[nodiscard]] JmpSrc vmovsd_ripr(XMMRegisterID dst) { 904 return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, dst); 905 } vmovss_ripr(XMMRegisterID dst)906 [[nodiscard]] JmpSrc vmovss_ripr(XMMRegisterID dst) { 907 return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, dst); 908 } vmovaps_ripr(XMMRegisterID dst)909 [[nodiscard]] JmpSrc vmovaps_ripr(XMMRegisterID dst) { 910 return twoByteRipOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, dst); 911 } vmovdqa_ripr(XMMRegisterID dst)912 [[nodiscard]] JmpSrc vmovdqa_ripr(XMMRegisterID dst) { 913 return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, dst); 914 } 915 vpaddb_ripr(XMMRegisterID src,XMMRegisterID dst)916 [[nodiscard]] JmpSrc vpaddb_ripr(XMMRegisterID src, XMMRegisterID dst) { 917 return twoByteRipOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, src, dst); 918 } vpaddw_ripr(XMMRegisterID src,XMMRegisterID dst)919 [[nodiscard]] JmpSrc vpaddw_ripr(XMMRegisterID src, XMMRegisterID dst) { 920 return twoByteRipOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, src, dst); 921 } vpaddd_ripr(XMMRegisterID src,XMMRegisterID dst)922 [[nodiscard]] JmpSrc vpaddd_ripr(XMMRegisterID src, XMMRegisterID dst) { 923 return twoByteRipOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, src, dst); 924 } vpaddq_ripr(XMMRegisterID src,XMMRegisterID dst)925 [[nodiscard]] JmpSrc vpaddq_ripr(XMMRegisterID src, XMMRegisterID dst) { 926 return twoByteRipOpSimd("vpaddq", VEX_PD, OP2_PADDQ_VdqWdq, src, dst); 927 } vpsubb_ripr(XMMRegisterID src,XMMRegisterID dst)928 [[nodiscard]] JmpSrc vpsubb_ripr(XMMRegisterID src, XMMRegisterID dst) { 929 return twoByteRipOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, src, dst); 930 } vpsubw_ripr(XMMRegisterID src,XMMRegisterID dst)931 [[nodiscard]] JmpSrc vpsubw_ripr(XMMRegisterID src, XMMRegisterID dst) { 932 return twoByteRipOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, src, dst); 933 } vpsubd_ripr(XMMRegisterID src,XMMRegisterID dst)934 [[nodiscard]] JmpSrc vpsubd_ripr(XMMRegisterID src, XMMRegisterID dst) { 935 return twoByteRipOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, src, dst); 936 } vpsubq_ripr(XMMRegisterID src,XMMRegisterID dst)937 [[nodiscard]] JmpSrc vpsubq_ripr(XMMRegisterID src, XMMRegisterID dst) { 938 return twoByteRipOpSimd("vpsubq", VEX_PD, OP2_PSUBQ_VdqWdq, src, dst); 939 } vpmullw_ripr(XMMRegisterID src,XMMRegisterID dst)940 [[nodiscard]] JmpSrc vpmullw_ripr(XMMRegisterID src, XMMRegisterID dst) { 941 return twoByteRipOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, src, dst); 942 } vpmulld_ripr(XMMRegisterID src,XMMRegisterID dst)943 [[nodiscard]] JmpSrc vpmulld_ripr(XMMRegisterID src, XMMRegisterID dst) { 944 return threeByteRipOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38, 945 src, dst); 946 } vpaddsb_ripr(XMMRegisterID src,XMMRegisterID dst)947 [[nodiscard]] JmpSrc vpaddsb_ripr(XMMRegisterID src, XMMRegisterID dst) { 948 return twoByteRipOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, src, dst); 949 } vpaddusb_ripr(XMMRegisterID src,XMMRegisterID dst)950 [[nodiscard]] JmpSrc vpaddusb_ripr(XMMRegisterID src, XMMRegisterID dst) { 951 return twoByteRipOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, src, dst); 952 } vpaddsw_ripr(XMMRegisterID src,XMMRegisterID dst)953 [[nodiscard]] JmpSrc vpaddsw_ripr(XMMRegisterID src, XMMRegisterID dst) { 954 return twoByteRipOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, src, dst); 955 } vpaddusw_ripr(XMMRegisterID src,XMMRegisterID dst)956 [[nodiscard]] JmpSrc vpaddusw_ripr(XMMRegisterID src, XMMRegisterID dst) { 957 return twoByteRipOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, src, dst); 958 } vpsubsb_ripr(XMMRegisterID src,XMMRegisterID dst)959 [[nodiscard]] JmpSrc vpsubsb_ripr(XMMRegisterID src, XMMRegisterID dst) { 960 return twoByteRipOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, src, dst); 961 } vpsubusb_ripr(XMMRegisterID src,XMMRegisterID dst)962 [[nodiscard]] JmpSrc vpsubusb_ripr(XMMRegisterID src, XMMRegisterID dst) { 963 return twoByteRipOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, src, dst); 964 } vpsubsw_ripr(XMMRegisterID src,XMMRegisterID dst)965 [[nodiscard]] JmpSrc vpsubsw_ripr(XMMRegisterID src, XMMRegisterID dst) { 966 return twoByteRipOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, src, dst); 967 } vpsubusw_ripr(XMMRegisterID src,XMMRegisterID dst)968 [[nodiscard]] JmpSrc vpsubusw_ripr(XMMRegisterID src, XMMRegisterID dst) { 969 return twoByteRipOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, src, dst); 970 } vpminsb_ripr(XMMRegisterID src,XMMRegisterID dst)971 [[nodiscard]] JmpSrc vpminsb_ripr(XMMRegisterID src, XMMRegisterID dst) { 972 return threeByteRipOpSimd("vpminsb", VEX_PD, OP3_PMINSB_VdqWdq, ESCAPE_38, 973 src, dst); 974 } vpminub_ripr(XMMRegisterID src,XMMRegisterID dst)975 [[nodiscard]] JmpSrc vpminub_ripr(XMMRegisterID src, XMMRegisterID dst) { 976 return twoByteRipOpSimd("vpminub", VEX_PD, OP2_PMINUB_VdqWdq, src, dst); 977 } vpminsw_ripr(XMMRegisterID src,XMMRegisterID dst)978 [[nodiscard]] JmpSrc vpminsw_ripr(XMMRegisterID src, XMMRegisterID dst) { 979 return twoByteRipOpSimd("vpminsw", VEX_PD, OP2_PMINSW_VdqWdq, src, dst); 980 } vpminuw_ripr(XMMRegisterID src,XMMRegisterID dst)981 [[nodiscard]] JmpSrc vpminuw_ripr(XMMRegisterID src, XMMRegisterID dst) { 982 return threeByteRipOpSimd("vpminuw", VEX_PD, OP3_PMINUW_VdqWdq, ESCAPE_38, 983 src, dst); 984 } vpminsd_ripr(XMMRegisterID src,XMMRegisterID dst)985 [[nodiscard]] JmpSrc vpminsd_ripr(XMMRegisterID src, XMMRegisterID dst) { 986 return threeByteRipOpSimd("vpminsd", VEX_PD, OP3_PMINSD_VdqWdq, ESCAPE_38, 987 src, dst); 988 } vpminud_ripr(XMMRegisterID src,XMMRegisterID dst)989 [[nodiscard]] JmpSrc vpminud_ripr(XMMRegisterID src, XMMRegisterID dst) { 990 return threeByteRipOpSimd("vpminud", VEX_PD, OP3_PMINUD_VdqWdq, ESCAPE_38, 991 src, dst); 992 } vpmaxsb_ripr(XMMRegisterID src,XMMRegisterID dst)993 [[nodiscard]] JmpSrc vpmaxsb_ripr(XMMRegisterID src, XMMRegisterID dst) { 994 return threeByteRipOpSimd("vpmaxsb", VEX_PD, OP3_PMAXSB_VdqWdq, ESCAPE_38, 995 src, dst); 996 } vpmaxub_ripr(XMMRegisterID src,XMMRegisterID dst)997 [[nodiscard]] JmpSrc vpmaxub_ripr(XMMRegisterID src, XMMRegisterID dst) { 998 return twoByteRipOpSimd("vpmaxub", VEX_PD, OP2_PMAXUB_VdqWdq, src, dst); 999 } vpmaxsw_ripr(XMMRegisterID src,XMMRegisterID dst)1000 [[nodiscard]] JmpSrc vpmaxsw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1001 return twoByteRipOpSimd("vpmaxsw", VEX_PD, OP2_PMAXSW_VdqWdq, src, dst); 1002 } vpmaxuw_ripr(XMMRegisterID src,XMMRegisterID dst)1003 [[nodiscard]] JmpSrc vpmaxuw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1004 return threeByteRipOpSimd("vpmaxuw", VEX_PD, OP3_PMAXUW_VdqWdq, ESCAPE_38, 1005 src, dst); 1006 } vpmaxsd_ripr(XMMRegisterID src,XMMRegisterID dst)1007 [[nodiscard]] JmpSrc vpmaxsd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1008 return threeByteRipOpSimd("vpmaxsd", VEX_PD, OP3_PMAXSD_VdqWdq, ESCAPE_38, 1009 src, dst); 1010 } vpmaxud_ripr(XMMRegisterID src,XMMRegisterID dst)1011 [[nodiscard]] JmpSrc vpmaxud_ripr(XMMRegisterID src, XMMRegisterID dst) { 1012 return threeByteRipOpSimd("vpmaxud", VEX_PD, OP3_PMAXUD_VdqWdq, ESCAPE_38, 1013 src, dst); 1014 } vpand_ripr(XMMRegisterID src,XMMRegisterID dst)1015 [[nodiscard]] JmpSrc vpand_ripr(XMMRegisterID src, XMMRegisterID dst) { 1016 return twoByteRipOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, src, dst); 1017 } vpxor_ripr(XMMRegisterID src,XMMRegisterID dst)1018 [[nodiscard]] JmpSrc vpxor_ripr(XMMRegisterID src, XMMRegisterID dst) { 1019 return twoByteRipOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, src, dst); 1020 } vpor_ripr(XMMRegisterID src,XMMRegisterID dst)1021 [[nodiscard]] JmpSrc vpor_ripr(XMMRegisterID src, XMMRegisterID dst) { 1022 return twoByteRipOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, src, dst); 1023 } vaddps_ripr(XMMRegisterID src,XMMRegisterID dst)1024 [[nodiscard]] JmpSrc vaddps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1025 return twoByteRipOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, src, dst); 1026 } vaddpd_ripr(XMMRegisterID src,XMMRegisterID dst)1027 [[nodiscard]] JmpSrc vaddpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1028 return twoByteRipOpSimd("vaddpd", VEX_PD, OP2_ADDPD_VpdWpd, src, dst); 1029 } vsubps_ripr(XMMRegisterID src,XMMRegisterID dst)1030 [[nodiscard]] JmpSrc vsubps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1031 return twoByteRipOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, src, dst); 1032 } vsubpd_ripr(XMMRegisterID src,XMMRegisterID dst)1033 [[nodiscard]] JmpSrc vsubpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1034 return twoByteRipOpSimd("vsubpd", VEX_PD, OP2_SUBPD_VpdWpd, src, dst); 1035 } vdivps_ripr(XMMRegisterID src,XMMRegisterID dst)1036 [[nodiscard]] JmpSrc vdivps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1037 return twoByteRipOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, src, dst); 1038 } vdivpd_ripr(XMMRegisterID src,XMMRegisterID dst)1039 [[nodiscard]] JmpSrc vdivpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1040 return twoByteRipOpSimd("vdivpd", VEX_PD, OP2_DIVPD_VpdWpd, src, dst); 1041 } vmulps_ripr(XMMRegisterID src,XMMRegisterID dst)1042 [[nodiscard]] JmpSrc vmulps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1043 return twoByteRipOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, src, dst); 1044 } vmulpd_ripr(XMMRegisterID src,XMMRegisterID dst)1045 [[nodiscard]] JmpSrc vmulpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1046 return twoByteRipOpSimd("vmulpd", VEX_PD, OP2_MULPD_VpdWpd, src, dst); 1047 } vpacksswb_ripr(XMMRegisterID src,XMMRegisterID dst)1048 [[nodiscard]] JmpSrc vpacksswb_ripr(XMMRegisterID src, XMMRegisterID dst) { 1049 return twoByteRipOpSimd("vpacksswb", VEX_PD, OP2_PACKSSWB_VdqWdq, src, dst); 1050 } vpackuswb_ripr(XMMRegisterID src,XMMRegisterID dst)1051 [[nodiscard]] JmpSrc vpackuswb_ripr(XMMRegisterID src, XMMRegisterID dst) { 1052 return twoByteRipOpSimd("vpackuswb", VEX_PD, OP2_PACKUSWB_VdqWdq, src, dst); 1053 } vpackssdw_ripr(XMMRegisterID src,XMMRegisterID dst)1054 [[nodiscard]] JmpSrc vpackssdw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1055 return twoByteRipOpSimd("vpackssdw", VEX_PD, OP2_PACKSSDW_VdqWdq, src, dst); 1056 } vpackusdw_ripr(XMMRegisterID src,XMMRegisterID dst)1057 [[nodiscard]] JmpSrc vpackusdw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1058 return threeByteRipOpSimd("vpackusdw", VEX_PD, OP3_PACKUSDW_VdqWdq, 1059 ESCAPE_38, src, dst); 1060 } vptest_ripr(XMMRegisterID lhs)1061 [[nodiscard]] JmpSrc vptest_ripr(XMMRegisterID lhs) { 1062 return threeByteRipOpSimd("vptest", VEX_PD, OP3_PTEST_VdVd, ESCAPE_38, lhs); 1063 } vpshufb_ripr(XMMRegisterID src,XMMRegisterID dst)1064 [[nodiscard]] JmpSrc vpshufb_ripr(XMMRegisterID src, XMMRegisterID dst) { 1065 return threeByteRipOpSimd("vpshufb", VEX_PD, OP3_PSHUFB_VdqWdq, ESCAPE_38, 1066 src, dst); 1067 } vpmaddwd_ripr(XMMRegisterID src,XMMRegisterID dst)1068 [[nodiscard]] JmpSrc vpmaddwd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1069 return twoByteRipOpSimd("vpmaddwd", VEX_PD, OP2_PMADDWD_VdqWdq, src, dst); 1070 } vpcmpeqb_ripr(XMMRegisterID src,XMMRegisterID dst)1071 [[nodiscard]] JmpSrc vpcmpeqb_ripr(XMMRegisterID src, XMMRegisterID dst) { 1072 return twoByteRipOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, src, dst); 1073 } vpcmpgtb_ripr(XMMRegisterID src,XMMRegisterID dst)1074 [[nodiscard]] JmpSrc vpcmpgtb_ripr(XMMRegisterID src, XMMRegisterID dst) { 1075 return twoByteRipOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, src, dst); 1076 } vpcmpeqw_ripr(XMMRegisterID src,XMMRegisterID dst)1077 [[nodiscard]] JmpSrc vpcmpeqw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1078 return twoByteRipOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, src, dst); 1079 } vpcmpgtw_ripr(XMMRegisterID src,XMMRegisterID dst)1080 [[nodiscard]] JmpSrc vpcmpgtw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1081 return twoByteRipOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, src, dst); 1082 } vpcmpeqd_ripr(XMMRegisterID src,XMMRegisterID dst)1083 [[nodiscard]] JmpSrc vpcmpeqd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1084 return twoByteRipOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, src, dst); 1085 } vpcmpgtd_ripr(XMMRegisterID src,XMMRegisterID dst)1086 [[nodiscard]] JmpSrc vpcmpgtd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1087 return twoByteRipOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, src, dst); 1088 } vcmpeqps_ripr(XMMRegisterID src,XMMRegisterID dst)1089 [[nodiscard]] JmpSrc vcmpeqps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1090 return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, 1091 X86Encoding::ConditionCmp_EQ, src, dst); 1092 } vcmpneqps_ripr(XMMRegisterID src,XMMRegisterID dst)1093 [[nodiscard]] JmpSrc vcmpneqps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1094 return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, 1095 X86Encoding::ConditionCmp_NEQ, src, dst); 1096 } vcmpltps_ripr(XMMRegisterID src,XMMRegisterID dst)1097 [[nodiscard]] JmpSrc vcmpltps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1098 return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, 1099 X86Encoding::ConditionCmp_LT, src, dst); 1100 } vcmpleps_ripr(XMMRegisterID src,XMMRegisterID dst)1101 [[nodiscard]] JmpSrc vcmpleps_ripr(XMMRegisterID src, XMMRegisterID dst) { 1102 return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps, 1103 X86Encoding::ConditionCmp_LE, src, dst); 1104 } vcmpeqpd_ripr(XMMRegisterID src,XMMRegisterID dst)1105 [[nodiscard]] JmpSrc vcmpeqpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1106 return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd, 1107 X86Encoding::ConditionCmp_EQ, src, dst); 1108 } vcmpneqpd_ripr(XMMRegisterID src,XMMRegisterID dst)1109 [[nodiscard]] JmpSrc vcmpneqpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1110 return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd, 1111 X86Encoding::ConditionCmp_NEQ, src, dst); 1112 } vcmpltpd_ripr(XMMRegisterID src,XMMRegisterID dst)1113 [[nodiscard]] JmpSrc vcmpltpd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1114 return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd, 1115 X86Encoding::ConditionCmp_LT, src, dst); 1116 } vcmplepd_ripr(XMMRegisterID src,XMMRegisterID dst)1117 [[nodiscard]] JmpSrc vcmplepd_ripr(XMMRegisterID src, XMMRegisterID dst) { 1118 return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd, 1119 X86Encoding::ConditionCmp_LE, src, dst); 1120 } vpmaddubsw_ripr(XMMRegisterID src,XMMRegisterID dst)1121 [[nodiscard]] JmpSrc vpmaddubsw_ripr(XMMRegisterID src, XMMRegisterID dst) { 1122 return threeByteRipOpSimd("vpmaddubsw", VEX_PD, OP3_PMADDUBSW_VdqWdq, 1123 ESCAPE_38, src, dst); 1124 } 1125 1126 // BMI instructions: 1127 sarxq_rrr(RegisterID src,RegisterID shift,RegisterID dst)1128 void sarxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) { 1129 spew("sarxq %s, %s, %s", GPReg64Name(src), GPReg64Name(shift), 1130 GPReg64Name(dst)); 1131 1132 RegisterID rm = src; 1133 XMMRegisterID src0 = static_cast<XMMRegisterID>(shift); 1134 int reg = dst; 1135 m_formatter.threeByteOpVex64(VEX_SS /* = F3 */, OP3_SARX_GyEyBy, ESCAPE_38, 1136 rm, src0, reg); 1137 } 1138 shlxq_rrr(RegisterID src,RegisterID shift,RegisterID dst)1139 void shlxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) { 1140 spew("shlxq %s, %s, %s", GPReg64Name(src), GPReg64Name(shift), 1141 GPReg64Name(dst)); 1142 1143 RegisterID rm = src; 1144 XMMRegisterID src0 = static_cast<XMMRegisterID>(shift); 1145 int reg = dst; 1146 m_formatter.threeByteOpVex64(VEX_PD /* = 66 */, OP3_SHLX_GyEyBy, ESCAPE_38, 1147 rm, src0, reg); 1148 } 1149 shrxq_rrr(RegisterID src,RegisterID shift,RegisterID dst)1150 void shrxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) { 1151 spew("shrxq %s, %s, %s", GPReg64Name(src), GPReg64Name(shift), 1152 GPReg64Name(dst)); 1153 1154 RegisterID rm = src; 1155 XMMRegisterID src0 = static_cast<XMMRegisterID>(shift); 1156 int reg = dst; 1157 m_formatter.threeByteOpVex64(VEX_SD /* = F2 */, OP3_SHRX_GyEyBy, ESCAPE_38, 1158 rm, src0, reg); 1159 } 1160 1161 private: twoByteRipOpSimd(const char * name,VexOperandType ty,TwoByteOpcodeID opcode,XMMRegisterID reg)1162 [[nodiscard]] JmpSrc twoByteRipOpSimd(const char* name, VexOperandType ty, 1163 TwoByteOpcodeID opcode, 1164 XMMRegisterID reg) { 1165 MOZ_ASSERT(!IsXMMReversedOperands(opcode)); 1166 m_formatter.legacySSEPrefix(ty); 1167 m_formatter.twoByteRipOp(opcode, 0, reg); 1168 JmpSrc label(m_formatter.size()); 1169 spew("%-11s " MEM_o32r ", %s", legacySSEOpName(name), 1170 ADDR_o32r(label.offset()), XMMRegName(reg)); 1171 return label; 1172 } 1173 twoByteRipOpSimd(const char * name,VexOperandType ty,TwoByteOpcodeID opcode,XMMRegisterID src0,XMMRegisterID dst)1174 [[nodiscard]] JmpSrc twoByteRipOpSimd(const char* name, VexOperandType ty, 1175 TwoByteOpcodeID opcode, 1176 XMMRegisterID src0, XMMRegisterID dst) { 1177 MOZ_ASSERT(src0 != invalid_xmm && !IsXMMReversedOperands(opcode)); 1178 if (useLegacySSEEncoding(src0, dst)) { 1179 m_formatter.legacySSEPrefix(ty); 1180 m_formatter.twoByteRipOp(opcode, 0, dst); 1181 JmpSrc label(m_formatter.size()); 1182 spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), 1183 ADDR_o32r(label.offset()), XMMRegName(dst)); 1184 return label; 1185 } 1186 1187 m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst); 1188 JmpSrc label(m_formatter.size()); 1189 spew("%-11s, " MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()), 1190 XMMRegName(src0), XMMRegName(dst)); 1191 return label; 1192 } 1193 twoByteRipOpImmSimd(const char * name,VexOperandType ty,TwoByteOpcodeID opcode,uint32_t imm,XMMRegisterID src0,XMMRegisterID dst)1194 [[nodiscard]] JmpSrc twoByteRipOpImmSimd(const char* name, VexOperandType ty, 1195 TwoByteOpcodeID opcode, uint32_t imm, 1196 XMMRegisterID src0, 1197 XMMRegisterID dst) { 1198 MOZ_ASSERT(src0 != invalid_xmm && !IsXMMReversedOperands(opcode)); 1199 if (useLegacySSEEncoding(src0, dst)) { 1200 m_formatter.legacySSEPrefix(ty); 1201 m_formatter.twoByteRipOp(opcode, 0, dst); 1202 m_formatter.immediate8u(imm); 1203 JmpSrc label(m_formatter.size(), 1204 /* bytes trailing the patch field = */ 1); 1205 spew("%-11s$0x%x, " MEM_o32r ", %s", legacySSEOpName(name), imm, 1206 ADDR_o32r(label.offset()), XMMRegName(dst)); 1207 return label; 1208 } 1209 1210 m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst); 1211 m_formatter.immediate8u(imm); 1212 JmpSrc label(m_formatter.size(), 1213 /* bytes trailing the patch field = */ 1); 1214 spew("%-11s$0x%x, " MEM_o32r ", %s, %s", name, imm, 1215 ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst)); 1216 return label; 1217 } 1218 twoByteOpInt64Simd(const char * name,VexOperandType ty,TwoByteOpcodeID opcode,RegisterID rm,XMMRegisterID src0,XMMRegisterID dst)1219 void twoByteOpInt64Simd(const char* name, VexOperandType ty, 1220 TwoByteOpcodeID opcode, RegisterID rm, 1221 XMMRegisterID src0, XMMRegisterID dst) { 1222 if (useLegacySSEEncoding(src0, dst)) { 1223 if (IsXMMReversedOperands(opcode)) { 1224 spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst), 1225 GPRegName(rm)); 1226 } else { 1227 spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm), 1228 XMMRegName(dst)); 1229 } 1230 m_formatter.legacySSEPrefix(ty); 1231 m_formatter.twoByteOp64(opcode, rm, dst); 1232 return; 1233 } 1234 1235 if (src0 == invalid_xmm) { 1236 if (IsXMMReversedOperands(opcode)) { 1237 spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm)); 1238 } else { 1239 spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst)); 1240 } 1241 } else { 1242 spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0), 1243 XMMRegName(dst)); 1244 } 1245 m_formatter.twoByteOpVex64(ty, opcode, rm, src0, dst); 1246 } 1247 twoByteOpSimdInt64(const char * name,VexOperandType ty,TwoByteOpcodeID opcode,XMMRegisterID rm,RegisterID dst)1248 void twoByteOpSimdInt64(const char* name, VexOperandType ty, 1249 TwoByteOpcodeID opcode, XMMRegisterID rm, 1250 RegisterID dst) { 1251 if (useLegacySSEEncodingAlways()) { 1252 if (IsXMMReversedOperands(opcode)) { 1253 spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst), 1254 XMMRegName(rm)); 1255 } else if (opcode == OP2_MOVD_EdVd) { 1256 spew("%-11s%s, %s", legacySSEOpName(name), 1257 XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm)); 1258 } else { 1259 spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm), 1260 GPRegName(dst)); 1261 } 1262 m_formatter.legacySSEPrefix(ty); 1263 m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst); 1264 return; 1265 } 1266 1267 if (IsXMMReversedOperands(opcode)) { 1268 spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm)); 1269 } else if (opcode == OP2_MOVD_EdVd) { 1270 spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst), 1271 GPRegName((RegisterID)rm)); 1272 } else { 1273 spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst)); 1274 } 1275 m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm, 1276 (XMMRegisterID)dst); 1277 } 1278 threeByteRipOpSimd(const char * name,VexOperandType ty,ThreeByteOpcodeID opcode,ThreeByteEscape escape,XMMRegisterID dst)1279 [[nodiscard]] JmpSrc threeByteRipOpSimd(const char* name, VexOperandType ty, 1280 ThreeByteOpcodeID opcode, 1281 ThreeByteEscape escape, 1282 XMMRegisterID dst) { 1283 m_formatter.legacySSEPrefix(ty); 1284 m_formatter.threeByteRipOp(opcode, escape, 0, dst); 1285 JmpSrc label(m_formatter.size()); 1286 spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), 1287 ADDR_o32r(label.offset()), XMMRegName(dst)); 1288 return label; 1289 } 1290 threeByteRipOpSimd(const char * name,VexOperandType ty,ThreeByteOpcodeID opcode,ThreeByteEscape escape,XMMRegisterID src0,XMMRegisterID dst)1291 [[nodiscard]] JmpSrc threeByteRipOpSimd(const char* name, VexOperandType ty, 1292 ThreeByteOpcodeID opcode, 1293 ThreeByteEscape escape, 1294 XMMRegisterID src0, 1295 XMMRegisterID dst) { 1296 MOZ_ASSERT(src0 != invalid_xmm); 1297 if (useLegacySSEEncoding(src0, dst)) { 1298 m_formatter.legacySSEPrefix(ty); 1299 m_formatter.threeByteRipOp(opcode, escape, 0, dst); 1300 JmpSrc label(m_formatter.size()); 1301 spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name), 1302 ADDR_o32r(label.offset()), XMMRegName(dst)); 1303 return label; 1304 } 1305 1306 m_formatter.threeByteRipOpVex(ty, opcode, escape, 0, src0, dst); 1307 JmpSrc label(m_formatter.size()); 1308 spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()), 1309 XMMRegName(src0), XMMRegName(dst)); 1310 return label; 1311 } 1312 threeByteOpImmSimdInt64(const char * name,VexOperandType ty,ThreeByteOpcodeID opcode,ThreeByteEscape escape,uint32_t imm,XMMRegisterID src,RegisterID dst)1313 void threeByteOpImmSimdInt64(const char* name, VexOperandType ty, 1314 ThreeByteOpcodeID opcode, ThreeByteEscape escape, 1315 uint32_t imm, XMMRegisterID src, 1316 RegisterID dst) { 1317 spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg64Name(dst), 1318 XMMRegName(src)); 1319 m_formatter.legacySSEPrefix(ty); 1320 m_formatter.threeByteOp64(opcode, escape, dst, (RegisterID)src); 1321 m_formatter.immediate8u(imm); 1322 } 1323 threeByteOpImmInt64Simd(const char * name,VexOperandType ty,ThreeByteOpcodeID opcode,ThreeByteEscape escape,uint32_t imm,RegisterID src1,XMMRegisterID src0,XMMRegisterID dst)1324 void threeByteOpImmInt64Simd(const char* name, VexOperandType ty, 1325 ThreeByteOpcodeID opcode, ThreeByteEscape escape, 1326 uint32_t imm, RegisterID src1, 1327 XMMRegisterID src0, XMMRegisterID dst) { 1328 if (useLegacySSEEncoding(src0, dst)) { 1329 spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg64Name(src1), 1330 XMMRegName(dst)); 1331 m_formatter.legacySSEPrefix(ty); 1332 m_formatter.threeByteOp64(opcode, escape, src1, (RegisterID)dst); 1333 m_formatter.immediate8u(imm); 1334 return; 1335 } 1336 1337 MOZ_ASSERT(src0 != invalid_xmm); 1338 spew("%-11s$0x%x, %s, %s, %s", name, imm, GPReg64Name(src1), 1339 XMMRegName(src0), XMMRegName(dst)); 1340 m_formatter.threeByteOpVex64(ty, opcode, escape, src1, src0, 1341 (RegisterID)dst); 1342 m_formatter.immediate8u(imm); 1343 } 1344 }; 1345 1346 using BaseAssemblerSpecific = BaseAssemblerX64; 1347 1348 } // namespace X86Encoding 1349 1350 } // namespace jit 1351 } // namespace js 1352 1353 #endif /* jit_x64_BaseAssembler_x64_h */ 1354