1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
28
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
30
31 #include "AssemblerBuffer.h"
32 #include <stdint.h>
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
35
36 namespace JSC {
37
CAN_SIGN_EXTEND_8_32(int32_t value)38 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
39
40 namespace X86Registers {
41 typedef enum {
42 eax,
43 ecx,
44 edx,
45 ebx,
46 esp,
47 ebp,
48 esi,
49 edi,
50
51 #if CPU(X86_64)
52 r8,
53 r9,
54 r10,
55 r11,
56 r12,
57 r13,
58 r14,
59 r15,
60 #endif
61 } RegisterID;
62
63 typedef enum {
64 xmm0,
65 xmm1,
66 xmm2,
67 xmm3,
68 xmm4,
69 xmm5,
70 xmm6,
71 xmm7,
72 } XMMRegisterID;
73 }
74
75 class X86Assembler {
76 public:
77 typedef X86Registers::RegisterID RegisterID;
78 typedef X86Registers::XMMRegisterID XMMRegisterID;
79 typedef XMMRegisterID FPRegisterID;
80
81 typedef enum {
82 ConditionO,
83 ConditionNO,
84 ConditionB,
85 ConditionAE,
86 ConditionE,
87 ConditionNE,
88 ConditionBE,
89 ConditionA,
90 ConditionS,
91 ConditionNS,
92 ConditionP,
93 ConditionNP,
94 ConditionL,
95 ConditionGE,
96 ConditionLE,
97 ConditionG,
98
99 ConditionC = ConditionB,
100 ConditionNC = ConditionAE,
101 } Condition;
102
103 private:
104 typedef enum {
105 OP_ADD_EvGv = 0x01,
106 OP_ADD_GvEv = 0x03,
107 OP_OR_EvGv = 0x09,
108 OP_OR_GvEv = 0x0B,
109 OP_2BYTE_ESCAPE = 0x0F,
110 OP_AND_EvGv = 0x21,
111 OP_AND_GvEv = 0x23,
112 OP_SUB_EvGv = 0x29,
113 OP_SUB_GvEv = 0x2B,
114 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
115 OP_XOR_EvGv = 0x31,
116 OP_XOR_GvEv = 0x33,
117 OP_CMP_EvGv = 0x39,
118 OP_CMP_GvEv = 0x3B,
119 #if CPU(X86_64)
120 PRE_REX = 0x40,
121 #endif
122 OP_PUSH_EAX = 0x50,
123 OP_POP_EAX = 0x58,
124 #if CPU(X86_64)
125 OP_MOVSXD_GvEv = 0x63,
126 #endif
127 PRE_OPERAND_SIZE = 0x66,
128 PRE_SSE_66 = 0x66,
129 OP_PUSH_Iz = 0x68,
130 OP_IMUL_GvEvIz = 0x69,
131 OP_GROUP1_EbIb = 0x80,
132 OP_GROUP1_EvIz = 0x81,
133 OP_GROUP1_EvIb = 0x83,
134 OP_TEST_EbGb = 0x84,
135 OP_TEST_EvGv = 0x85,
136 OP_XCHG_EvGv = 0x87,
137 OP_MOV_EvGv = 0x89,
138 OP_MOV_GvEv = 0x8B,
139 OP_LEA = 0x8D,
140 OP_GROUP1A_Ev = 0x8F,
141 OP_CDQ = 0x99,
142 OP_MOV_EAXOv = 0xA1,
143 OP_MOV_OvEAX = 0xA3,
144 OP_MOV_EAXIv = 0xB8,
145 OP_GROUP2_EvIb = 0xC1,
146 OP_RET = 0xC3,
147 OP_GROUP11_EvIz = 0xC7,
148 OP_INT3 = 0xCC,
149 OP_GROUP2_Ev1 = 0xD1,
150 OP_GROUP2_EvCL = 0xD3,
151 OP_CALL_rel32 = 0xE8,
152 OP_JMP_rel32 = 0xE9,
153 PRE_SSE_F2 = 0xF2,
154 OP_HLT = 0xF4,
155 OP_GROUP3_EbIb = 0xF6,
156 OP_GROUP3_Ev = 0xF7,
157 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
158 OP_GROUP5_Ev = 0xFF,
159 } OneByteOpcodeID;
160
161 typedef enum {
162 OP2_MOVSD_VsdWsd = 0x10,
163 OP2_MOVSD_WsdVsd = 0x11,
164 OP2_CVTSI2SD_VsdEd = 0x2A,
165 OP2_CVTTSD2SI_GdWsd = 0x2C,
166 OP2_UCOMISD_VsdWsd = 0x2E,
167 OP2_ADDSD_VsdWsd = 0x58,
168 OP2_MULSD_VsdWsd = 0x59,
169 OP2_SUBSD_VsdWsd = 0x5C,
170 OP2_DIVSD_VsdWsd = 0x5E,
171 OP2_SQRTSD_VsdWsd = 0x51,
172 OP2_XORPD_VpdWpd = 0x57,
173 OP2_MOVD_VdEd = 0x6E,
174 OP2_MOVD_EdVd = 0x7E,
175 OP2_JCC_rel32 = 0x80,
176 OP_SETCC = 0x90,
177 OP2_IMUL_GvEv = 0xAF,
178 OP2_MOVZX_GvEb = 0xB6,
179 OP2_MOVZX_GvEw = 0xB7,
180 OP2_PEXTRW_GdUdIb = 0xC5,
181 } TwoByteOpcodeID;
182
jccRel32(Condition cond)183 TwoByteOpcodeID jccRel32(Condition cond)
184 {
185 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
186 }
187
setccOpcode(Condition cond)188 TwoByteOpcodeID setccOpcode(Condition cond)
189 {
190 return (TwoByteOpcodeID)(OP_SETCC + cond);
191 }
192
193 typedef enum {
194 GROUP1_OP_ADD = 0,
195 GROUP1_OP_OR = 1,
196 GROUP1_OP_ADC = 2,
197 GROUP1_OP_AND = 4,
198 GROUP1_OP_SUB = 5,
199 GROUP1_OP_XOR = 6,
200 GROUP1_OP_CMP = 7,
201
202 GROUP1A_OP_POP = 0,
203
204 GROUP2_OP_SHL = 4,
205 GROUP2_OP_SHR = 5,
206 GROUP2_OP_SAR = 7,
207
208 GROUP3_OP_TEST = 0,
209 GROUP3_OP_NOT = 2,
210 GROUP3_OP_NEG = 3,
211 GROUP3_OP_IDIV = 7,
212
213 GROUP5_OP_CALLN = 2,
214 GROUP5_OP_JMPN = 4,
215 GROUP5_OP_PUSH = 6,
216
217 GROUP11_MOV = 0,
218 } GroupOpcodeID;
219
220 class X86InstructionFormatter;
221 public:
222
X86Assembler()223 X86Assembler()
224 {
225 }
226
227 // Stack operations:
228
push_r(RegisterID reg)229 void push_r(RegisterID reg)
230 {
231 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
232 }
233
pop_r(RegisterID reg)234 void pop_r(RegisterID reg)
235 {
236 m_formatter.oneByteOp(OP_POP_EAX, reg);
237 }
238
push_i32(int imm)239 void push_i32(int imm)
240 {
241 m_formatter.oneByteOp(OP_PUSH_Iz);
242 m_formatter.immediate32(imm);
243 }
244
push_m(int offset,RegisterID base)245 void push_m(int offset, RegisterID base)
246 {
247 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
248 }
249
pop_m(int offset,RegisterID base)250 void pop_m(int offset, RegisterID base)
251 {
252 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
253 }
254
255 // Arithmetic operations:
256
257 #if !CPU(X86_64)
adcl_im(int imm,const void * addr)258 void adcl_im(int imm, const void* addr)
259 {
260 if (CAN_SIGN_EXTEND_8_32(imm)) {
261 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
262 m_formatter.immediate8(imm);
263 } else {
264 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
265 m_formatter.immediate32(imm);
266 }
267 }
268 #endif
269
addl_rr(RegisterID src,RegisterID dst)270 void addl_rr(RegisterID src, RegisterID dst)
271 {
272 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
273 }
274
addl_mr(int offset,RegisterID base,RegisterID dst)275 void addl_mr(int offset, RegisterID base, RegisterID dst)
276 {
277 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
278 }
279
addl_rm(RegisterID src,int offset,RegisterID base)280 void addl_rm(RegisterID src, int offset, RegisterID base)
281 {
282 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
283 }
284
addl_ir(int imm,RegisterID dst)285 void addl_ir(int imm, RegisterID dst)
286 {
287 if (CAN_SIGN_EXTEND_8_32(imm)) {
288 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
289 m_formatter.immediate8(imm);
290 } else {
291 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
292 m_formatter.immediate32(imm);
293 }
294 }
295
addl_im(int imm,int offset,RegisterID base)296 void addl_im(int imm, int offset, RegisterID base)
297 {
298 if (CAN_SIGN_EXTEND_8_32(imm)) {
299 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
300 m_formatter.immediate8(imm);
301 } else {
302 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
303 m_formatter.immediate32(imm);
304 }
305 }
306
307 #if CPU(X86_64)
addq_rr(RegisterID src,RegisterID dst)308 void addq_rr(RegisterID src, RegisterID dst)
309 {
310 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
311 }
312
addq_ir(int imm,RegisterID dst)313 void addq_ir(int imm, RegisterID dst)
314 {
315 if (CAN_SIGN_EXTEND_8_32(imm)) {
316 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
317 m_formatter.immediate8(imm);
318 } else {
319 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
320 m_formatter.immediate32(imm);
321 }
322 }
323
addq_im(int imm,int offset,RegisterID base)324 void addq_im(int imm, int offset, RegisterID base)
325 {
326 if (CAN_SIGN_EXTEND_8_32(imm)) {
327 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
328 m_formatter.immediate8(imm);
329 } else {
330 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
331 m_formatter.immediate32(imm);
332 }
333 }
334 #else
addl_im(int imm,const void * addr)335 void addl_im(int imm, const void* addr)
336 {
337 if (CAN_SIGN_EXTEND_8_32(imm)) {
338 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
339 m_formatter.immediate8(imm);
340 } else {
341 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
342 m_formatter.immediate32(imm);
343 }
344 }
345 #endif
346
andl_rr(RegisterID src,RegisterID dst)347 void andl_rr(RegisterID src, RegisterID dst)
348 {
349 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
350 }
351
andl_mr(int offset,RegisterID base,RegisterID dst)352 void andl_mr(int offset, RegisterID base, RegisterID dst)
353 {
354 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
355 }
356
andl_rm(RegisterID src,int offset,RegisterID base)357 void andl_rm(RegisterID src, int offset, RegisterID base)
358 {
359 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
360 }
361
andl_ir(int imm,RegisterID dst)362 void andl_ir(int imm, RegisterID dst)
363 {
364 if (CAN_SIGN_EXTEND_8_32(imm)) {
365 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
366 m_formatter.immediate8(imm);
367 } else {
368 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
369 m_formatter.immediate32(imm);
370 }
371 }
372
andl_im(int imm,int offset,RegisterID base)373 void andl_im(int imm, int offset, RegisterID base)
374 {
375 if (CAN_SIGN_EXTEND_8_32(imm)) {
376 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
377 m_formatter.immediate8(imm);
378 } else {
379 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
380 m_formatter.immediate32(imm);
381 }
382 }
383
384 #if CPU(X86_64)
andq_rr(RegisterID src,RegisterID dst)385 void andq_rr(RegisterID src, RegisterID dst)
386 {
387 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
388 }
389
andq_ir(int imm,RegisterID dst)390 void andq_ir(int imm, RegisterID dst)
391 {
392 if (CAN_SIGN_EXTEND_8_32(imm)) {
393 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
394 m_formatter.immediate8(imm);
395 } else {
396 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
397 m_formatter.immediate32(imm);
398 }
399 }
400 #else
andl_im(int imm,const void * addr)401 void andl_im(int imm, const void* addr)
402 {
403 if (CAN_SIGN_EXTEND_8_32(imm)) {
404 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
405 m_formatter.immediate8(imm);
406 } else {
407 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
408 m_formatter.immediate32(imm);
409 }
410 }
411 #endif
412
negl_r(RegisterID dst)413 void negl_r(RegisterID dst)
414 {
415 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
416 }
417
negl_m(int offset,RegisterID base)418 void negl_m(int offset, RegisterID base)
419 {
420 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
421 }
422
notl_r(RegisterID dst)423 void notl_r(RegisterID dst)
424 {
425 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
426 }
427
notl_m(int offset,RegisterID base)428 void notl_m(int offset, RegisterID base)
429 {
430 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
431 }
432
orl_rr(RegisterID src,RegisterID dst)433 void orl_rr(RegisterID src, RegisterID dst)
434 {
435 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
436 }
437
orl_mr(int offset,RegisterID base,RegisterID dst)438 void orl_mr(int offset, RegisterID base, RegisterID dst)
439 {
440 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
441 }
442
orl_rm(RegisterID src,int offset,RegisterID base)443 void orl_rm(RegisterID src, int offset, RegisterID base)
444 {
445 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
446 }
447
orl_ir(int imm,RegisterID dst)448 void orl_ir(int imm, RegisterID dst)
449 {
450 if (CAN_SIGN_EXTEND_8_32(imm)) {
451 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
452 m_formatter.immediate8(imm);
453 } else {
454 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
455 m_formatter.immediate32(imm);
456 }
457 }
458
orl_im(int imm,int offset,RegisterID base)459 void orl_im(int imm, int offset, RegisterID base)
460 {
461 if (CAN_SIGN_EXTEND_8_32(imm)) {
462 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
463 m_formatter.immediate8(imm);
464 } else {
465 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
466 m_formatter.immediate32(imm);
467 }
468 }
469
470 #if CPU(X86_64)
orq_rr(RegisterID src,RegisterID dst)471 void orq_rr(RegisterID src, RegisterID dst)
472 {
473 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
474 }
475
orq_ir(int imm,RegisterID dst)476 void orq_ir(int imm, RegisterID dst)
477 {
478 if (CAN_SIGN_EXTEND_8_32(imm)) {
479 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
480 m_formatter.immediate8(imm);
481 } else {
482 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
483 m_formatter.immediate32(imm);
484 }
485 }
486 #else
orl_im(int imm,const void * addr)487 void orl_im(int imm, const void* addr)
488 {
489 if (CAN_SIGN_EXTEND_8_32(imm)) {
490 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
491 m_formatter.immediate8(imm);
492 } else {
493 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
494 m_formatter.immediate32(imm);
495 }
496 }
497 #endif
498
subl_rr(RegisterID src,RegisterID dst)499 void subl_rr(RegisterID src, RegisterID dst)
500 {
501 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
502 }
503
subl_mr(int offset,RegisterID base,RegisterID dst)504 void subl_mr(int offset, RegisterID base, RegisterID dst)
505 {
506 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
507 }
508
subl_rm(RegisterID src,int offset,RegisterID base)509 void subl_rm(RegisterID src, int offset, RegisterID base)
510 {
511 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
512 }
513
subl_ir(int imm,RegisterID dst)514 void subl_ir(int imm, RegisterID dst)
515 {
516 if (CAN_SIGN_EXTEND_8_32(imm)) {
517 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
518 m_formatter.immediate8(imm);
519 } else {
520 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
521 m_formatter.immediate32(imm);
522 }
523 }
524
subl_im(int imm,int offset,RegisterID base)525 void subl_im(int imm, int offset, RegisterID base)
526 {
527 if (CAN_SIGN_EXTEND_8_32(imm)) {
528 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
529 m_formatter.immediate8(imm);
530 } else {
531 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
532 m_formatter.immediate32(imm);
533 }
534 }
535
536 #if CPU(X86_64)
subq_rr(RegisterID src,RegisterID dst)537 void subq_rr(RegisterID src, RegisterID dst)
538 {
539 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
540 }
541
subq_ir(int imm,RegisterID dst)542 void subq_ir(int imm, RegisterID dst)
543 {
544 if (CAN_SIGN_EXTEND_8_32(imm)) {
545 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
546 m_formatter.immediate8(imm);
547 } else {
548 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
549 m_formatter.immediate32(imm);
550 }
551 }
552 #else
subl_im(int imm,const void * addr)553 void subl_im(int imm, const void* addr)
554 {
555 if (CAN_SIGN_EXTEND_8_32(imm)) {
556 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
557 m_formatter.immediate8(imm);
558 } else {
559 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
560 m_formatter.immediate32(imm);
561 }
562 }
563 #endif
564
xorl_rr(RegisterID src,RegisterID dst)565 void xorl_rr(RegisterID src, RegisterID dst)
566 {
567 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
568 }
569
xorl_mr(int offset,RegisterID base,RegisterID dst)570 void xorl_mr(int offset, RegisterID base, RegisterID dst)
571 {
572 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
573 }
574
xorl_rm(RegisterID src,int offset,RegisterID base)575 void xorl_rm(RegisterID src, int offset, RegisterID base)
576 {
577 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
578 }
579
xorl_im(int imm,int offset,RegisterID base)580 void xorl_im(int imm, int offset, RegisterID base)
581 {
582 if (CAN_SIGN_EXTEND_8_32(imm)) {
583 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
584 m_formatter.immediate8(imm);
585 } else {
586 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
587 m_formatter.immediate32(imm);
588 }
589 }
590
xorl_ir(int imm,RegisterID dst)591 void xorl_ir(int imm, RegisterID dst)
592 {
593 if (CAN_SIGN_EXTEND_8_32(imm)) {
594 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
595 m_formatter.immediate8(imm);
596 } else {
597 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
598 m_formatter.immediate32(imm);
599 }
600 }
601
602 #if CPU(X86_64)
xorq_rr(RegisterID src,RegisterID dst)603 void xorq_rr(RegisterID src, RegisterID dst)
604 {
605 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
606 }
607
xorq_ir(int imm,RegisterID dst)608 void xorq_ir(int imm, RegisterID dst)
609 {
610 if (CAN_SIGN_EXTEND_8_32(imm)) {
611 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
612 m_formatter.immediate8(imm);
613 } else {
614 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
615 m_formatter.immediate32(imm);
616 }
617 }
618 #endif
619
sarl_i8r(int imm,RegisterID dst)620 void sarl_i8r(int imm, RegisterID dst)
621 {
622 if (imm == 1)
623 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
624 else {
625 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
626 m_formatter.immediate8(imm);
627 }
628 }
629
sarl_CLr(RegisterID dst)630 void sarl_CLr(RegisterID dst)
631 {
632 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
633 }
634
shrl_i8r(int imm,RegisterID dst)635 void shrl_i8r(int imm, RegisterID dst)
636 {
637 if (imm == 1)
638 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
639 else {
640 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
641 m_formatter.immediate8(imm);
642 }
643 }
644
shrl_CLr(RegisterID dst)645 void shrl_CLr(RegisterID dst)
646 {
647 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
648 }
649
shll_i8r(int imm,RegisterID dst)650 void shll_i8r(int imm, RegisterID dst)
651 {
652 if (imm == 1)
653 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
654 else {
655 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
656 m_formatter.immediate8(imm);
657 }
658 }
659
shll_CLr(RegisterID dst)660 void shll_CLr(RegisterID dst)
661 {
662 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
663 }
664
665 #if CPU(X86_64)
sarq_CLr(RegisterID dst)666 void sarq_CLr(RegisterID dst)
667 {
668 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
669 }
670
sarq_i8r(int imm,RegisterID dst)671 void sarq_i8r(int imm, RegisterID dst)
672 {
673 if (imm == 1)
674 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
675 else {
676 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
677 m_formatter.immediate8(imm);
678 }
679 }
680 #endif
681
imull_rr(RegisterID src,RegisterID dst)682 void imull_rr(RegisterID src, RegisterID dst)
683 {
684 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
685 }
686
imull_mr(int offset,RegisterID base,RegisterID dst)687 void imull_mr(int offset, RegisterID base, RegisterID dst)
688 {
689 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
690 }
691
imull_i32r(RegisterID src,int32_t value,RegisterID dst)692 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
693 {
694 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
695 m_formatter.immediate32(value);
696 }
697
idivl_r(RegisterID dst)698 void idivl_r(RegisterID dst)
699 {
700 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
701 }
702
703 // Comparisons:
704
cmpl_rr(RegisterID src,RegisterID dst)705 void cmpl_rr(RegisterID src, RegisterID dst)
706 {
707 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
708 }
709
cmpl_rm(RegisterID src,int offset,RegisterID base)710 void cmpl_rm(RegisterID src, int offset, RegisterID base)
711 {
712 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
713 }
714
cmpl_mr(int offset,RegisterID base,RegisterID src)715 void cmpl_mr(int offset, RegisterID base, RegisterID src)
716 {
717 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
718 }
719
cmpl_ir(int imm,RegisterID dst)720 void cmpl_ir(int imm, RegisterID dst)
721 {
722 if (CAN_SIGN_EXTEND_8_32(imm)) {
723 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
724 m_formatter.immediate8(imm);
725 } else {
726 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
727 m_formatter.immediate32(imm);
728 }
729 }
730
cmpl_ir_force32(int imm,RegisterID dst)731 void cmpl_ir_force32(int imm, RegisterID dst)
732 {
733 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
734 m_formatter.immediate32(imm);
735 }
736
cmpl_im(int imm,int offset,RegisterID base)737 void cmpl_im(int imm, int offset, RegisterID base)
738 {
739 if (CAN_SIGN_EXTEND_8_32(imm)) {
740 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
741 m_formatter.immediate8(imm);
742 } else {
743 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
744 m_formatter.immediate32(imm);
745 }
746 }
747
cmpb_im(int imm,int offset,RegisterID base)748 void cmpb_im(int imm, int offset, RegisterID base)
749 {
750 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
751 m_formatter.immediate8(imm);
752 }
753
cmpb_im(int imm,int offset,RegisterID base,RegisterID index,int scale)754 void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
755 {
756 m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
757 m_formatter.immediate8(imm);
758 }
759
cmpl_im(int imm,int offset,RegisterID base,RegisterID index,int scale)760 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
761 {
762 if (CAN_SIGN_EXTEND_8_32(imm)) {
763 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
764 m_formatter.immediate8(imm);
765 } else {
766 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
767 m_formatter.immediate32(imm);
768 }
769 }
770
cmpl_im_force32(int imm,int offset,RegisterID base)771 void cmpl_im_force32(int imm, int offset, RegisterID base)
772 {
773 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
774 m_formatter.immediate32(imm);
775 }
776
777 #if CPU(X86_64)
cmpq_rr(RegisterID src,RegisterID dst)778 void cmpq_rr(RegisterID src, RegisterID dst)
779 {
780 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
781 }
782
cmpq_rm(RegisterID src,int offset,RegisterID base)783 void cmpq_rm(RegisterID src, int offset, RegisterID base)
784 {
785 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
786 }
787
cmpq_mr(int offset,RegisterID base,RegisterID src)788 void cmpq_mr(int offset, RegisterID base, RegisterID src)
789 {
790 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
791 }
792
cmpq_ir(int imm,RegisterID dst)793 void cmpq_ir(int imm, RegisterID dst)
794 {
795 if (CAN_SIGN_EXTEND_8_32(imm)) {
796 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
797 m_formatter.immediate8(imm);
798 } else {
799 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
800 m_formatter.immediate32(imm);
801 }
802 }
803
cmpq_im(int imm,int offset,RegisterID base)804 void cmpq_im(int imm, int offset, RegisterID base)
805 {
806 if (CAN_SIGN_EXTEND_8_32(imm)) {
807 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
808 m_formatter.immediate8(imm);
809 } else {
810 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
811 m_formatter.immediate32(imm);
812 }
813 }
814
cmpq_im(int imm,int offset,RegisterID base,RegisterID index,int scale)815 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
816 {
817 if (CAN_SIGN_EXTEND_8_32(imm)) {
818 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
819 m_formatter.immediate8(imm);
820 } else {
821 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
822 m_formatter.immediate32(imm);
823 }
824 }
825 #else
cmpl_rm(RegisterID reg,const void * addr)826 void cmpl_rm(RegisterID reg, const void* addr)
827 {
828 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
829 }
830
cmpl_im(int imm,const void * addr)831 void cmpl_im(int imm, const void* addr)
832 {
833 if (CAN_SIGN_EXTEND_8_32(imm)) {
834 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
835 m_formatter.immediate8(imm);
836 } else {
837 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
838 m_formatter.immediate32(imm);
839 }
840 }
841 #endif
842
cmpw_rm(RegisterID src,int offset,RegisterID base,RegisterID index,int scale)843 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
844 {
845 m_formatter.prefix(PRE_OPERAND_SIZE);
846 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
847 }
848
cmpw_im(int imm,int offset,RegisterID base,RegisterID index,int scale)849 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
850 {
851 if (CAN_SIGN_EXTEND_8_32(imm)) {
852 m_formatter.prefix(PRE_OPERAND_SIZE);
853 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
854 m_formatter.immediate8(imm);
855 } else {
856 m_formatter.prefix(PRE_OPERAND_SIZE);
857 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
858 m_formatter.immediate16(imm);
859 }
860 }
861
testl_rr(RegisterID src,RegisterID dst)862 void testl_rr(RegisterID src, RegisterID dst)
863 {
864 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
865 }
866
testl_i32r(int imm,RegisterID dst)867 void testl_i32r(int imm, RegisterID dst)
868 {
869 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
870 m_formatter.immediate32(imm);
871 }
872
testl_i32m(int imm,int offset,RegisterID base)873 void testl_i32m(int imm, int offset, RegisterID base)
874 {
875 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
876 m_formatter.immediate32(imm);
877 }
878
testb_rr(RegisterID src,RegisterID dst)879 void testb_rr(RegisterID src, RegisterID dst)
880 {
881 m_formatter.oneByteOp(OP_TEST_EbGb, src, dst);
882 }
883
testb_im(int imm,int offset,RegisterID base)884 void testb_im(int imm, int offset, RegisterID base)
885 {
886 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
887 m_formatter.immediate8(imm);
888 }
889
testb_im(int imm,int offset,RegisterID base,RegisterID index,int scale)890 void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
891 {
892 m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
893 m_formatter.immediate8(imm);
894 }
895
testl_i32m(int imm,int offset,RegisterID base,RegisterID index,int scale)896 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
897 {
898 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
899 m_formatter.immediate32(imm);
900 }
901
902 #if CPU(X86_64)
testq_rr(RegisterID src,RegisterID dst)903 void testq_rr(RegisterID src, RegisterID dst)
904 {
905 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
906 }
907
testq_i32r(int imm,RegisterID dst)908 void testq_i32r(int imm, RegisterID dst)
909 {
910 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
911 m_formatter.immediate32(imm);
912 }
913
testq_i32m(int imm,int offset,RegisterID base)914 void testq_i32m(int imm, int offset, RegisterID base)
915 {
916 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
917 m_formatter.immediate32(imm);
918 }
919
testq_i32m(int imm,int offset,RegisterID base,RegisterID index,int scale)920 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
921 {
922 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
923 m_formatter.immediate32(imm);
924 }
925 #endif
926
testw_rr(RegisterID src,RegisterID dst)927 void testw_rr(RegisterID src, RegisterID dst)
928 {
929 m_formatter.prefix(PRE_OPERAND_SIZE);
930 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
931 }
932
testb_i8r(int imm,RegisterID dst)933 void testb_i8r(int imm, RegisterID dst)
934 {
935 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
936 m_formatter.immediate8(imm);
937 }
938
setCC_r(Condition cond,RegisterID dst)939 void setCC_r(Condition cond, RegisterID dst)
940 {
941 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
942 }
943
sete_r(RegisterID dst)944 void sete_r(RegisterID dst)
945 {
946 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
947 }
948
setz_r(RegisterID dst)949 void setz_r(RegisterID dst)
950 {
951 sete_r(dst);
952 }
953
setne_r(RegisterID dst)954 void setne_r(RegisterID dst)
955 {
956 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
957 }
958
setnz_r(RegisterID dst)959 void setnz_r(RegisterID dst)
960 {
961 setne_r(dst);
962 }
963
964 // Various move ops:
965
cdq()966 void cdq()
967 {
968 m_formatter.oneByteOp(OP_CDQ);
969 }
970
xchgl_rr(RegisterID src,RegisterID dst)971 void xchgl_rr(RegisterID src, RegisterID dst)
972 {
973 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
974 }
975
976 #if CPU(X86_64)
xchgq_rr(RegisterID src,RegisterID dst)977 void xchgq_rr(RegisterID src, RegisterID dst)
978 {
979 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
980 }
981 #endif
982
movl_rr(RegisterID src,RegisterID dst)983 void movl_rr(RegisterID src, RegisterID dst)
984 {
985 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
986 }
987
movl_rm(RegisterID src,int offset,RegisterID base)988 void movl_rm(RegisterID src, int offset, RegisterID base)
989 {
990 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
991 }
992
movl_rm_disp32(RegisterID src,int offset,RegisterID base)993 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
994 {
995 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
996 }
997
movl_rm(RegisterID src,int offset,RegisterID base,RegisterID index,int scale)998 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
999 {
1000 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1001 }
1002
movl_mEAX(const void * addr)1003 void movl_mEAX(const void* addr)
1004 {
1005 m_formatter.oneByteOp(OP_MOV_EAXOv);
1006 #if CPU(X86_64)
1007 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1008 #else
1009 m_formatter.immediate32(reinterpret_cast<int>(addr));
1010 #endif
1011 }
1012
movl_mr(int offset,RegisterID base,RegisterID dst)1013 void movl_mr(int offset, RegisterID base, RegisterID dst)
1014 {
1015 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1016 }
1017
movl_mr_disp32(int offset,RegisterID base,RegisterID dst)1018 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1019 {
1020 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1021 }
1022
movl_mr(int offset,RegisterID base,RegisterID index,int scale,RegisterID dst)1023 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1024 {
1025 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1026 }
1027
movl_i32r(int imm,RegisterID dst)1028 void movl_i32r(int imm, RegisterID dst)
1029 {
1030 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1031 m_formatter.immediate32(imm);
1032 }
1033
movl_i32m(int imm,int offset,RegisterID base)1034 void movl_i32m(int imm, int offset, RegisterID base)
1035 {
1036 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1037 m_formatter.immediate32(imm);
1038 }
1039
movl_EAXm(const void * addr)1040 void movl_EAXm(const void* addr)
1041 {
1042 m_formatter.oneByteOp(OP_MOV_OvEAX);
1043 #if CPU(X86_64)
1044 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1045 #else
1046 m_formatter.immediate32(reinterpret_cast<int>(addr));
1047 #endif
1048 }
1049
1050 #if CPU(X86_64)
movq_rr(RegisterID src,RegisterID dst)1051 void movq_rr(RegisterID src, RegisterID dst)
1052 {
1053 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1054 }
1055
movq_rm(RegisterID src,int offset,RegisterID base)1056 void movq_rm(RegisterID src, int offset, RegisterID base)
1057 {
1058 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1059 }
1060
movq_rm_disp32(RegisterID src,int offset,RegisterID base)1061 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1062 {
1063 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1064 }
1065
movq_rm(RegisterID src,int offset,RegisterID base,RegisterID index,int scale)1066 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1067 {
1068 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1069 }
1070
movq_mEAX(const void * addr)1071 void movq_mEAX(const void* addr)
1072 {
1073 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1074 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1075 }
1076
movq_EAXm(const void * addr)1077 void movq_EAXm(const void* addr)
1078 {
1079 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1080 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1081 }
1082
movq_mr(int offset,RegisterID base,RegisterID dst)1083 void movq_mr(int offset, RegisterID base, RegisterID dst)
1084 {
1085 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1086 }
1087
movq_mr_disp32(int offset,RegisterID base,RegisterID dst)1088 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1089 {
1090 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1091 }
1092
movq_mr(int offset,RegisterID base,RegisterID index,int scale,RegisterID dst)1093 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1094 {
1095 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1096 }
1097
movq_i32m(int imm,int offset,RegisterID base)1098 void movq_i32m(int imm, int offset, RegisterID base)
1099 {
1100 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1101 m_formatter.immediate32(imm);
1102 }
1103
movq_i64r(int64_t imm,RegisterID dst)1104 void movq_i64r(int64_t imm, RegisterID dst)
1105 {
1106 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1107 m_formatter.immediate64(imm);
1108 }
1109
movsxd_rr(RegisterID src,RegisterID dst)1110 void movsxd_rr(RegisterID src, RegisterID dst)
1111 {
1112 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1113 }
1114
1115
1116 #else
movl_rm(RegisterID src,const void * addr)1117 void movl_rm(RegisterID src, const void* addr)
1118 {
1119 if (src == X86Registers::eax)
1120 movl_EAXm(addr);
1121 else
1122 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1123 }
1124
movl_mr(const void * addr,RegisterID dst)1125 void movl_mr(const void* addr, RegisterID dst)
1126 {
1127 if (dst == X86Registers::eax)
1128 movl_mEAX(addr);
1129 else
1130 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1131 }
1132
movl_i32m(int imm,const void * addr)1133 void movl_i32m(int imm, const void* addr)
1134 {
1135 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1136 m_formatter.immediate32(imm);
1137 }
1138 #endif
1139
movzwl_mr(int offset,RegisterID base,RegisterID dst)1140 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1141 {
1142 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1143 }
1144
movzwl_mr(int offset,RegisterID base,RegisterID index,int scale,RegisterID dst)1145 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1146 {
1147 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1148 }
1149
movzbl_rr(RegisterID src,RegisterID dst)1150 void movzbl_rr(RegisterID src, RegisterID dst)
1151 {
1152 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1153 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1154 // REX prefixes are defined to be silently ignored by the processor.
1155 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1156 }
1157
leal_mr(int offset,RegisterID base,RegisterID dst)1158 void leal_mr(int offset, RegisterID base, RegisterID dst)
1159 {
1160 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1161 }
1162 #if CPU(X86_64)
leaq_mr(int offset,RegisterID base,RegisterID dst)1163 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1164 {
1165 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1166 }
1167 #endif
1168
1169 // Flow control:
1170
call()1171 AssemblerLabel call()
1172 {
1173 m_formatter.oneByteOp(OP_CALL_rel32);
1174 return m_formatter.immediateRel32();
1175 }
1176
call(RegisterID dst)1177 AssemblerLabel call(RegisterID dst)
1178 {
1179 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1180 return m_formatter.label();
1181 }
1182
call_m(int offset,RegisterID base)1183 void call_m(int offset, RegisterID base)
1184 {
1185 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1186 }
1187
jmp()1188 AssemblerLabel jmp()
1189 {
1190 m_formatter.oneByteOp(OP_JMP_rel32);
1191 return m_formatter.immediateRel32();
1192 }
1193
1194 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1195 // To make a tail recursive call on x86-64. The MacroAssembler
1196 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
jmp_r(RegisterID dst)1197 AssemblerLabel jmp_r(RegisterID dst)
1198 {
1199 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1200 return m_formatter.label();
1201 }
1202
jmp_m(int offset,RegisterID base)1203 void jmp_m(int offset, RegisterID base)
1204 {
1205 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1206 }
1207
jne()1208 AssemblerLabel jne()
1209 {
1210 m_formatter.twoByteOp(jccRel32(ConditionNE));
1211 return m_formatter.immediateRel32();
1212 }
1213
jnz()1214 AssemblerLabel jnz()
1215 {
1216 return jne();
1217 }
1218
je()1219 AssemblerLabel je()
1220 {
1221 m_formatter.twoByteOp(jccRel32(ConditionE));
1222 return m_formatter.immediateRel32();
1223 }
1224
jz()1225 AssemblerLabel jz()
1226 {
1227 return je();
1228 }
1229
jl()1230 AssemblerLabel jl()
1231 {
1232 m_formatter.twoByteOp(jccRel32(ConditionL));
1233 return m_formatter.immediateRel32();
1234 }
1235
jb()1236 AssemblerLabel jb()
1237 {
1238 m_formatter.twoByteOp(jccRel32(ConditionB));
1239 return m_formatter.immediateRel32();
1240 }
1241
jle()1242 AssemblerLabel jle()
1243 {
1244 m_formatter.twoByteOp(jccRel32(ConditionLE));
1245 return m_formatter.immediateRel32();
1246 }
1247
jbe()1248 AssemblerLabel jbe()
1249 {
1250 m_formatter.twoByteOp(jccRel32(ConditionBE));
1251 return m_formatter.immediateRel32();
1252 }
1253
jge()1254 AssemblerLabel jge()
1255 {
1256 m_formatter.twoByteOp(jccRel32(ConditionGE));
1257 return m_formatter.immediateRel32();
1258 }
1259
jg()1260 AssemblerLabel jg()
1261 {
1262 m_formatter.twoByteOp(jccRel32(ConditionG));
1263 return m_formatter.immediateRel32();
1264 }
1265
ja()1266 AssemblerLabel ja()
1267 {
1268 m_formatter.twoByteOp(jccRel32(ConditionA));
1269 return m_formatter.immediateRel32();
1270 }
1271
jae()1272 AssemblerLabel jae()
1273 {
1274 m_formatter.twoByteOp(jccRel32(ConditionAE));
1275 return m_formatter.immediateRel32();
1276 }
1277
jo()1278 AssemblerLabel jo()
1279 {
1280 m_formatter.twoByteOp(jccRel32(ConditionO));
1281 return m_formatter.immediateRel32();
1282 }
1283
jp()1284 AssemblerLabel jp()
1285 {
1286 m_formatter.twoByteOp(jccRel32(ConditionP));
1287 return m_formatter.immediateRel32();
1288 }
1289
js()1290 AssemblerLabel js()
1291 {
1292 m_formatter.twoByteOp(jccRel32(ConditionS));
1293 return m_formatter.immediateRel32();
1294 }
1295
jCC(Condition cond)1296 AssemblerLabel jCC(Condition cond)
1297 {
1298 m_formatter.twoByteOp(jccRel32(cond));
1299 return m_formatter.immediateRel32();
1300 }
1301
1302 // SSE operations:
1303
addsd_rr(XMMRegisterID src,XMMRegisterID dst)1304 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1305 {
1306 m_formatter.prefix(PRE_SSE_F2);
1307 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1308 }
1309
addsd_mr(int offset,RegisterID base,XMMRegisterID dst)1310 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1311 {
1312 m_formatter.prefix(PRE_SSE_F2);
1313 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1314 }
1315
cvtsi2sd_rr(RegisterID src,XMMRegisterID dst)1316 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1317 {
1318 m_formatter.prefix(PRE_SSE_F2);
1319 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1320 }
1321
cvtsi2sd_mr(int offset,RegisterID base,XMMRegisterID dst)1322 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1323 {
1324 m_formatter.prefix(PRE_SSE_F2);
1325 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1326 }
1327
1328 #if !CPU(X86_64)
cvtsi2sd_mr(const void * address,XMMRegisterID dst)1329 void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
1330 {
1331 m_formatter.prefix(PRE_SSE_F2);
1332 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1333 }
1334 #endif
1335
cvttsd2si_rr(XMMRegisterID src,RegisterID dst)1336 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1337 {
1338 m_formatter.prefix(PRE_SSE_F2);
1339 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1340 }
1341
movd_rr(XMMRegisterID src,RegisterID dst)1342 void movd_rr(XMMRegisterID src, RegisterID dst)
1343 {
1344 m_formatter.prefix(PRE_SSE_66);
1345 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1346 }
1347
1348 #if CPU(X86_64)
movq_rr(XMMRegisterID src,RegisterID dst)1349 void movq_rr(XMMRegisterID src, RegisterID dst)
1350 {
1351 m_formatter.prefix(PRE_SSE_66);
1352 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1353 }
1354
movq_rr(RegisterID src,XMMRegisterID dst)1355 void movq_rr(RegisterID src, XMMRegisterID dst)
1356 {
1357 m_formatter.prefix(PRE_SSE_66);
1358 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1359 }
1360 #endif
1361
movsd_rr(XMMRegisterID src,XMMRegisterID dst)1362 void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
1363 {
1364 m_formatter.prefix(PRE_SSE_F2);
1365 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1366 }
1367
movsd_rm(XMMRegisterID src,int offset,RegisterID base)1368 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1369 {
1370 m_formatter.prefix(PRE_SSE_F2);
1371 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1372 }
1373
movsd_mr(int offset,RegisterID base,XMMRegisterID dst)1374 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1375 {
1376 m_formatter.prefix(PRE_SSE_F2);
1377 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1378 }
1379
1380 #if !CPU(X86_64)
movsd_mr(const void * address,XMMRegisterID dst)1381 void movsd_mr(const void* address, XMMRegisterID dst)
1382 {
1383 m_formatter.prefix(PRE_SSE_F2);
1384 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1385 }
1386 #endif
1387
mulsd_rr(XMMRegisterID src,XMMRegisterID dst)1388 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1389 {
1390 m_formatter.prefix(PRE_SSE_F2);
1391 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1392 }
1393
mulsd_mr(int offset,RegisterID base,XMMRegisterID dst)1394 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1395 {
1396 m_formatter.prefix(PRE_SSE_F2);
1397 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1398 }
1399
pextrw_irr(int whichWord,XMMRegisterID src,RegisterID dst)1400 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1401 {
1402 m_formatter.prefix(PRE_SSE_66);
1403 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1404 m_formatter.immediate8(whichWord);
1405 }
1406
subsd_rr(XMMRegisterID src,XMMRegisterID dst)1407 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1408 {
1409 m_formatter.prefix(PRE_SSE_F2);
1410 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1411 }
1412
subsd_mr(int offset,RegisterID base,XMMRegisterID dst)1413 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1414 {
1415 m_formatter.prefix(PRE_SSE_F2);
1416 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1417 }
1418
ucomisd_rr(XMMRegisterID src,XMMRegisterID dst)1419 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1420 {
1421 m_formatter.prefix(PRE_SSE_66);
1422 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1423 }
1424
ucomisd_mr(int offset,RegisterID base,XMMRegisterID dst)1425 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1426 {
1427 m_formatter.prefix(PRE_SSE_66);
1428 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1429 }
1430
divsd_rr(XMMRegisterID src,XMMRegisterID dst)1431 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1432 {
1433 m_formatter.prefix(PRE_SSE_F2);
1434 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1435 }
1436
divsd_mr(int offset,RegisterID base,XMMRegisterID dst)1437 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1438 {
1439 m_formatter.prefix(PRE_SSE_F2);
1440 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1441 }
1442
xorpd_rr(XMMRegisterID src,XMMRegisterID dst)1443 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1444 {
1445 m_formatter.prefix(PRE_SSE_66);
1446 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1447 }
1448
sqrtsd_rr(XMMRegisterID src,XMMRegisterID dst)1449 void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
1450 {
1451 m_formatter.prefix(PRE_SSE_F2);
1452 m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1453 }
1454
1455 // Misc instructions:
1456
int3()1457 void int3()
1458 {
1459 m_formatter.oneByteOp(OP_INT3);
1460 }
1461
ret()1462 void ret()
1463 {
1464 m_formatter.oneByteOp(OP_RET);
1465 }
1466
predictNotTaken()1467 void predictNotTaken()
1468 {
1469 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1470 }
1471
1472 // Assembler admin methods:
1473
codeSize()1474 size_t codeSize() const
1475 {
1476 return m_formatter.codeSize();
1477 }
1478
label()1479 AssemblerLabel label()
1480 {
1481 return m_formatter.label();
1482 }
1483
align(int alignment)1484 AssemblerLabel align(int alignment)
1485 {
1486 while (!m_formatter.isAligned(alignment))
1487 m_formatter.oneByteOp(OP_HLT);
1488
1489 return label();
1490 }
1491
1492 // Linking & patching:
1493 //
1494 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1495 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1496 // code has been finalized it is (platform support permitting) within a non-
1497 // writable region of memory; to modify the code in an execute-only execuable
1498 // pool the 'repatch' and 'relink' methods should be used.
1499
linkJump(AssemblerLabel from,AssemblerLabel to)1500 void linkJump(AssemblerLabel from, AssemblerLabel to)
1501 {
1502 ASSERT(from.isSet());
1503 ASSERT(to.isSet());
1504
1505 char* code = reinterpret_cast<char*>(m_formatter.data());
1506 ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
1507 setRel32(code + from.m_offset, code + to.m_offset);
1508 }
1509
linkJump(void * code,AssemblerLabel from,void * to)1510 static void linkJump(void* code, AssemblerLabel from, void* to)
1511 {
1512 ASSERT(from.isSet());
1513
1514 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1515 }
1516
linkCall(void * code,AssemblerLabel from,void * to)1517 static void linkCall(void* code, AssemblerLabel from, void* to)
1518 {
1519 ASSERT(from.isSet());
1520
1521 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1522 }
1523
linkPointer(void * code,AssemblerLabel where,void * value)1524 static void linkPointer(void* code, AssemblerLabel where, void* value)
1525 {
1526 ASSERT(where.isSet());
1527
1528 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1529 }
1530
relinkJump(void * from,void * to)1531 static void relinkJump(void* from, void* to)
1532 {
1533 setRel32(from, to);
1534 }
1535
relinkCall(void * from,void * to)1536 static void relinkCall(void* from, void* to)
1537 {
1538 setRel32(from, to);
1539 }
1540
repatchInt32(void * where,int32_t value)1541 static void repatchInt32(void* where, int32_t value)
1542 {
1543 setInt32(where, value);
1544 }
1545
repatchPointer(void * where,void * value)1546 static void repatchPointer(void* where, void* value)
1547 {
1548 setPointer(where, value);
1549 }
1550
getCallReturnOffset(AssemblerLabel call)1551 static unsigned getCallReturnOffset(AssemblerLabel call)
1552 {
1553 ASSERT(call.isSet());
1554 return call.m_offset;
1555 }
1556
getRelocatedAddress(void * code,AssemblerLabel label)1557 static void* getRelocatedAddress(void* code, AssemblerLabel label)
1558 {
1559 ASSERT(label.isSet());
1560 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1561 }
1562
getDifferenceBetweenLabels(AssemblerLabel a,AssemblerLabel b)1563 static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1564 {
1565 return b.m_offset - a.m_offset;
1566 }
1567
executableCopy(ExecutablePool * allocator)1568 void* executableCopy(ExecutablePool* allocator)
1569 {
1570 return m_formatter.executableCopy(allocator);
1571 }
1572
rewindToLabel(AssemblerLabel rewindTo)1573 void rewindToLabel(AssemblerLabel rewindTo) { m_formatter.rewindToLabel(rewindTo); }
1574
1575 #ifndef NDEBUG
debugOffset()1576 unsigned debugOffset() { return m_formatter.debugOffset(); }
1577 #endif
1578
1579 private:
1580
setPointer(void * where,void * value)1581 static void setPointer(void* where, void* value)
1582 {
1583 reinterpret_cast<void**>(where)[-1] = value;
1584 }
1585
setInt32(void * where,int32_t value)1586 static void setInt32(void* where, int32_t value)
1587 {
1588 reinterpret_cast<int32_t*>(where)[-1] = value;
1589 }
1590
setRel32(void * from,void * to)1591 static void setRel32(void* from, void* to)
1592 {
1593 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1594 ASSERT(offset == static_cast<int32_t>(offset));
1595
1596 setInt32(from, offset);
1597 }
1598
1599 class X86InstructionFormatter {
1600
1601 static const int maxInstructionSize = 16;
1602
1603 public:
1604
1605 // Legacy prefix bytes:
1606 //
1607 // These are emmitted prior to the instruction.
1608
prefix(OneByteOpcodeID pre)1609 void prefix(OneByteOpcodeID pre)
1610 {
1611 m_buffer.putByte(pre);
1612 }
1613
1614 // Word-sized operands / no operand instruction formatters.
1615 //
1616 // In addition to the opcode, the following operand permutations are supported:
1617 // * None - instruction takes no operands.
1618 // * One register - the low three bits of the RegisterID are added into the opcode.
1619 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1620 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1621 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1622 //
1623 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1624 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1625 //
1626 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1627
oneByteOp(OneByteOpcodeID opcode)1628 void oneByteOp(OneByteOpcodeID opcode)
1629 {
1630 m_buffer.ensureSpace(maxInstructionSize);
1631 m_buffer.putByteUnchecked(opcode);
1632 }
1633
oneByteOp(OneByteOpcodeID opcode,RegisterID reg)1634 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1635 {
1636 m_buffer.ensureSpace(maxInstructionSize);
1637 emitRexIfNeeded(0, 0, reg);
1638 m_buffer.putByteUnchecked(opcode + (reg & 7));
1639 }
1640
oneByteOp(OneByteOpcodeID opcode,int reg,RegisterID rm)1641 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1642 {
1643 m_buffer.ensureSpace(maxInstructionSize);
1644 emitRexIfNeeded(reg, 0, rm);
1645 m_buffer.putByteUnchecked(opcode);
1646 registerModRM(reg, rm);
1647 }
1648
oneByteOp(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1649 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1650 {
1651 m_buffer.ensureSpace(maxInstructionSize);
1652 emitRexIfNeeded(reg, 0, base);
1653 m_buffer.putByteUnchecked(opcode);
1654 memoryModRM(reg, base, offset);
1655 }
1656
oneByteOp_disp32(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1657 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1658 {
1659 m_buffer.ensureSpace(maxInstructionSize);
1660 emitRexIfNeeded(reg, 0, base);
1661 m_buffer.putByteUnchecked(opcode);
1662 memoryModRM_disp32(reg, base, offset);
1663 }
1664
oneByteOp(OneByteOpcodeID opcode,int reg,RegisterID base,RegisterID index,int scale,int offset)1665 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1666 {
1667 m_buffer.ensureSpace(maxInstructionSize);
1668 emitRexIfNeeded(reg, index, base);
1669 m_buffer.putByteUnchecked(opcode);
1670 memoryModRM(reg, base, index, scale, offset);
1671 }
1672
1673 #if !CPU(X86_64)
oneByteOp(OneByteOpcodeID opcode,int reg,const void * address)1674 void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
1675 {
1676 m_buffer.ensureSpace(maxInstructionSize);
1677 m_buffer.putByteUnchecked(opcode);
1678 memoryModRM(reg, address);
1679 }
1680 #endif
1681
twoByteOp(TwoByteOpcodeID opcode)1682 void twoByteOp(TwoByteOpcodeID opcode)
1683 {
1684 m_buffer.ensureSpace(maxInstructionSize);
1685 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1686 m_buffer.putByteUnchecked(opcode);
1687 }
1688
twoByteOp(TwoByteOpcodeID opcode,int reg,RegisterID rm)1689 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1690 {
1691 m_buffer.ensureSpace(maxInstructionSize);
1692 emitRexIfNeeded(reg, 0, rm);
1693 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1694 m_buffer.putByteUnchecked(opcode);
1695 registerModRM(reg, rm);
1696 }
1697
twoByteOp(TwoByteOpcodeID opcode,int reg,RegisterID base,int offset)1698 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1699 {
1700 m_buffer.ensureSpace(maxInstructionSize);
1701 emitRexIfNeeded(reg, 0, base);
1702 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1703 m_buffer.putByteUnchecked(opcode);
1704 memoryModRM(reg, base, offset);
1705 }
1706
twoByteOp(TwoByteOpcodeID opcode,int reg,RegisterID base,RegisterID index,int scale,int offset)1707 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1708 {
1709 m_buffer.ensureSpace(maxInstructionSize);
1710 emitRexIfNeeded(reg, index, base);
1711 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1712 m_buffer.putByteUnchecked(opcode);
1713 memoryModRM(reg, base, index, scale, offset);
1714 }
1715
1716 #if !CPU(X86_64)
twoByteOp(TwoByteOpcodeID opcode,int reg,const void * address)1717 void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
1718 {
1719 m_buffer.ensureSpace(maxInstructionSize);
1720 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1721 m_buffer.putByteUnchecked(opcode);
1722 memoryModRM(reg, address);
1723 }
1724 #endif
1725
1726 #if CPU(X86_64)
1727 // Quad-word-sized operands:
1728 //
1729 // Used to format 64-bit operantions, planting a REX.w prefix.
1730 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1731 // the normal (non-'64'-postfixed) formatters should be used.
1732
oneByteOp64(OneByteOpcodeID opcode)1733 void oneByteOp64(OneByteOpcodeID opcode)
1734 {
1735 m_buffer.ensureSpace(maxInstructionSize);
1736 emitRexW(0, 0, 0);
1737 m_buffer.putByteUnchecked(opcode);
1738 }
1739
oneByteOp64(OneByteOpcodeID opcode,RegisterID reg)1740 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1741 {
1742 m_buffer.ensureSpace(maxInstructionSize);
1743 emitRexW(0, 0, reg);
1744 m_buffer.putByteUnchecked(opcode + (reg & 7));
1745 }
1746
oneByteOp64(OneByteOpcodeID opcode,int reg,RegisterID rm)1747 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1748 {
1749 m_buffer.ensureSpace(maxInstructionSize);
1750 emitRexW(reg, 0, rm);
1751 m_buffer.putByteUnchecked(opcode);
1752 registerModRM(reg, rm);
1753 }
1754
oneByteOp64(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1755 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1756 {
1757 m_buffer.ensureSpace(maxInstructionSize);
1758 emitRexW(reg, 0, base);
1759 m_buffer.putByteUnchecked(opcode);
1760 memoryModRM(reg, base, offset);
1761 }
1762
oneByteOp64_disp32(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1763 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1764 {
1765 m_buffer.ensureSpace(maxInstructionSize);
1766 emitRexW(reg, 0, base);
1767 m_buffer.putByteUnchecked(opcode);
1768 memoryModRM_disp32(reg, base, offset);
1769 }
1770
oneByteOp64(OneByteOpcodeID opcode,int reg,RegisterID base,RegisterID index,int scale,int offset)1771 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1772 {
1773 m_buffer.ensureSpace(maxInstructionSize);
1774 emitRexW(reg, index, base);
1775 m_buffer.putByteUnchecked(opcode);
1776 memoryModRM(reg, base, index, scale, offset);
1777 }
1778
twoByteOp64(TwoByteOpcodeID opcode,int reg,RegisterID rm)1779 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1780 {
1781 m_buffer.ensureSpace(maxInstructionSize);
1782 emitRexW(reg, 0, rm);
1783 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1784 m_buffer.putByteUnchecked(opcode);
1785 registerModRM(reg, rm);
1786 }
1787 #endif
1788
1789 // Byte-operands:
1790 //
1791 // These methods format byte operations. Byte operations differ from the normal
1792 // formatters in the circumstances under which they will decide to emit REX prefixes.
1793 // These should be used where any register operand signifies a byte register.
1794 //
1795 // The disctinction is due to the handling of register numbers in the range 4..7 on
1796 // x86-64. These register numbers may either represent the second byte of the first
1797 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1798 //
1799 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1800 // be accessed where a REX prefix is present), these are likely best treated as
1801 // deprecated. In order to ensure the correct registers spl..dil are selected a
1802 // REX prefix will be emitted for any byte register operand in the range 4..15.
1803 //
1804 // These formatters may be used in instructions where a mix of operand sizes, in which
1805 // case an unnecessary REX will be emitted, for example:
1806 // movzbl %al, %edi
1807 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1808 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1809 // be silently ignored by the processor.
1810 //
1811 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1812 // is provided to check byte register operands.
1813
oneByteOp8(OneByteOpcodeID opcode,GroupOpcodeID groupOp,RegisterID rm)1814 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1815 {
1816 m_buffer.ensureSpace(maxInstructionSize);
1817 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1818 m_buffer.putByteUnchecked(opcode);
1819 registerModRM(groupOp, rm);
1820 }
1821
twoByteOp8(TwoByteOpcodeID opcode,RegisterID reg,RegisterID rm)1822 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1823 {
1824 m_buffer.ensureSpace(maxInstructionSize);
1825 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1826 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1827 m_buffer.putByteUnchecked(opcode);
1828 registerModRM(reg, rm);
1829 }
1830
twoByteOp8(TwoByteOpcodeID opcode,GroupOpcodeID groupOp,RegisterID rm)1831 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1832 {
1833 m_buffer.ensureSpace(maxInstructionSize);
1834 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1835 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1836 m_buffer.putByteUnchecked(opcode);
1837 registerModRM(groupOp, rm);
1838 }
1839
1840 // Immediates:
1841 //
1842 // An immedaite should be appended where appropriate after an op has been emitted.
1843 // The writes are unchecked since the opcode formatters above will have ensured space.
1844
immediate8(int imm)1845 void immediate8(int imm)
1846 {
1847 m_buffer.putByteUnchecked(imm);
1848 }
1849
immediate16(int imm)1850 void immediate16(int imm)
1851 {
1852 m_buffer.putShortUnchecked(imm);
1853 }
1854
immediate32(int imm)1855 void immediate32(int imm)
1856 {
1857 m_buffer.putIntUnchecked(imm);
1858 }
1859
immediate64(int64_t imm)1860 void immediate64(int64_t imm)
1861 {
1862 m_buffer.putInt64Unchecked(imm);
1863 }
1864
immediateRel32()1865 AssemblerLabel immediateRel32()
1866 {
1867 m_buffer.putIntUnchecked(0);
1868 return label();
1869 }
1870
1871 // Administrative methods:
1872
codeSize()1873 size_t codeSize() const { return m_buffer.codeSize(); }
label()1874 AssemblerLabel label() const { return m_buffer.label(); }
isAligned(int alignment)1875 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
data()1876 void* data() const { return m_buffer.data(); }
1877
executableCopy(ExecutablePool * allocator)1878 void* executableCopy(ExecutablePool* allocator)
1879 {
1880 return m_buffer.executableCopy(allocator);
1881 }
1882
rewindToLabel(AssemblerLabel rewindTo)1883 void rewindToLabel(AssemblerLabel rewindTo) { m_buffer.rewindToLabel(rewindTo); }
1884
1885 #ifndef NDEBUG
debugOffset()1886 unsigned debugOffset() { return m_buffer.debugOffset(); }
1887 #endif
1888
1889 private:
1890
1891 // Internals; ModRm and REX formatters.
1892
1893 static const RegisterID noBase = X86Registers::ebp;
1894 static const RegisterID hasSib = X86Registers::esp;
1895 static const RegisterID noIndex = X86Registers::esp;
1896 #if CPU(X86_64)
1897 static const RegisterID noBase2 = X86Registers::r13;
1898 static const RegisterID hasSib2 = X86Registers::r12;
1899
1900 // Registers r8 & above require a REX prefixe.
regRequiresRex(int reg)1901 inline bool regRequiresRex(int reg)
1902 {
1903 return (reg >= X86Registers::r8);
1904 }
1905
1906 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
byteRegRequiresRex(int reg)1907 inline bool byteRegRequiresRex(int reg)
1908 {
1909 return (reg >= X86Registers::esp);
1910 }
1911
1912 // Format a REX prefix byte.
emitRex(bool w,int r,int x,int b)1913 inline void emitRex(bool w, int r, int x, int b)
1914 {
1915 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1916 }
1917
1918 // Used to plant a REX byte with REX.w set (for 64-bit operations).
emitRexW(int r,int x,int b)1919 inline void emitRexW(int r, int x, int b)
1920 {
1921 emitRex(true, r, x, b);
1922 }
1923
1924 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1925 // regRequiresRex() to check other registers (i.e. address base & index).
emitRexIf(bool condition,int r,int x,int b)1926 inline void emitRexIf(bool condition, int r, int x, int b)
1927 {
1928 if (condition) emitRex(false, r, x, b);
1929 }
1930
1931 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
emitRexIfNeeded(int r,int x,int b)1932 inline void emitRexIfNeeded(int r, int x, int b)
1933 {
1934 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1935 }
1936 #else
1937 // No REX prefix bytes on 32-bit x86.
regRequiresRex(int)1938 inline bool regRequiresRex(int) { return false; }
byteRegRequiresRex(int)1939 inline bool byteRegRequiresRex(int) { return false; }
emitRexIf(bool,int,int,int)1940 inline void emitRexIf(bool, int, int, int) {}
emitRexIfNeeded(int,int,int)1941 inline void emitRexIfNeeded(int, int, int) {}
1942 #endif
1943
1944 enum ModRmMode {
1945 ModRmMemoryNoDisp,
1946 ModRmMemoryDisp8,
1947 ModRmMemoryDisp32,
1948 ModRmRegister,
1949 };
1950
putModRm(ModRmMode mode,int reg,RegisterID rm)1951 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1952 {
1953 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1954 }
1955
putModRmSib(ModRmMode mode,int reg,RegisterID base,RegisterID index,int scale)1956 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1957 {
1958 ASSERT(mode != ModRmRegister);
1959
1960 putModRm(mode, reg, hasSib);
1961 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1962 }
1963
registerModRM(int reg,RegisterID rm)1964 void registerModRM(int reg, RegisterID rm)
1965 {
1966 putModRm(ModRmRegister, reg, rm);
1967 }
1968
memoryModRM(int reg,RegisterID base,int offset)1969 void memoryModRM(int reg, RegisterID base, int offset)
1970 {
1971 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1972 #if CPU(X86_64)
1973 if ((base == hasSib) || (base == hasSib2)) {
1974 #else
1975 if (base == hasSib) {
1976 #endif
1977 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1978 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1979 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1980 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1981 m_buffer.putByteUnchecked(offset);
1982 } else {
1983 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1984 m_buffer.putIntUnchecked(offset);
1985 }
1986 } else {
1987 #if CPU(X86_64)
1988 if (!offset && (base != noBase) && (base != noBase2))
1989 #else
1990 if (!offset && (base != noBase))
1991 #endif
1992 putModRm(ModRmMemoryNoDisp, reg, base);
1993 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1994 putModRm(ModRmMemoryDisp8, reg, base);
1995 m_buffer.putByteUnchecked(offset);
1996 } else {
1997 putModRm(ModRmMemoryDisp32, reg, base);
1998 m_buffer.putIntUnchecked(offset);
1999 }
2000 }
2001 }
2002
2003 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2004 {
2005 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2006 #if CPU(X86_64)
2007 if ((base == hasSib) || (base == hasSib2)) {
2008 #else
2009 if (base == hasSib) {
2010 #endif
2011 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2012 m_buffer.putIntUnchecked(offset);
2013 } else {
2014 putModRm(ModRmMemoryDisp32, reg, base);
2015 m_buffer.putIntUnchecked(offset);
2016 }
2017 }
2018
2019 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2020 {
2021 ASSERT(index != noIndex);
2022
2023 #if CPU(X86_64)
2024 if (!offset && (base != noBase) && (base != noBase2))
2025 #else
2026 if (!offset && (base != noBase))
2027 #endif
2028 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2029 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2030 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2031 m_buffer.putByteUnchecked(offset);
2032 } else {
2033 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2034 m_buffer.putIntUnchecked(offset);
2035 }
2036 }
2037
2038 #if !CPU(X86_64)
2039 void memoryModRM(int reg, const void* address)
2040 {
2041 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2042 putModRm(ModRmMemoryNoDisp, reg, noBase);
2043 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2044 }
2045 #endif
2046
2047 AssemblerBuffer m_buffer;
2048 } m_formatter;
2049 };
2050
2051 } // namespace JSC
2052
2053 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2054
2055 #endif // X86Assembler_h
2056