1 /*
2 * i386 translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32
33 #include "exec/log.h"
34
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef HELPER_H
38
39 /* Fixes for Windows namespace pollution. */
40 #undef IN
41 #undef OUT
42
43 #define PREFIX_REPZ 0x01
44 #define PREFIX_REPNZ 0x02
45 #define PREFIX_LOCK 0x04
46 #define PREFIX_DATA 0x08
47 #define PREFIX_ADR 0x10
48 #define PREFIX_VEX 0x20
49 #define PREFIX_REX 0x40
50
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
58
59 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
60 #define CASE_MODRM_MEM_OP(OP) \
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64
65 #define CASE_MODRM_OP(OP) \
66 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70
71 //#define MACRO_TEST 1
72
73 /* global register indexes */
74 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
75 static TCGv cpu_eip;
76 static TCGv_i32 cpu_cc_op;
77 static TCGv cpu_regs[CPU_NB_REGS];
78 static TCGv cpu_seg_base[6];
79 static TCGv_i64 cpu_bndl[4];
80 static TCGv_i64 cpu_bndu[4];
81
82 typedef struct DisasContext {
83 DisasContextBase base;
84
85 target_ulong pc; /* pc = eip + cs_base */
86 target_ulong cs_base; /* base of CS segment */
87 target_ulong pc_save;
88
89 MemOp aflag;
90 MemOp dflag;
91
92 int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
93 uint8_t prefix;
94
95 bool has_modrm;
96 uint8_t modrm;
97
98 #ifndef CONFIG_USER_ONLY
99 uint8_t cpl; /* code priv level */
100 uint8_t iopl; /* i/o priv level */
101 #endif
102 uint8_t vex_l; /* vex vector length */
103 uint8_t vex_v; /* vex vvvv register, without 1's complement. */
104 uint8_t popl_esp_hack; /* for correct popl with esp base handling */
105 uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
106
107 #ifdef TARGET_X86_64
108 uint8_t rex_r;
109 uint8_t rex_x;
110 uint8_t rex_b;
111 #endif
112 bool vex_w; /* used by AVX even on 32-bit processors */
113 bool jmp_opt; /* use direct block chaining for direct jumps */
114 bool repz_opt; /* optimize jumps within repz instructions */
115 bool cc_op_dirty;
116
117 CCOp cc_op; /* current CC operation */
118 int mem_index; /* select memory access functions */
119 uint32_t flags; /* all execution flags */
120 int cpuid_features;
121 int cpuid_ext_features;
122 int cpuid_ext2_features;
123 int cpuid_ext3_features;
124 int cpuid_7_0_ebx_features;
125 int cpuid_7_0_ecx_features;
126 int cpuid_7_1_eax_features;
127 int cpuid_xsave_features;
128
129 /* TCG local temps */
130 TCGv cc_srcT;
131 TCGv A0;
132 TCGv T0;
133 TCGv T1;
134
135 /* TCG local register indexes (only used inside old micro ops) */
136 TCGv tmp0;
137 TCGv tmp4;
138 TCGv_i32 tmp2_i32;
139 TCGv_i32 tmp3_i32;
140 TCGv_i64 tmp1_i64;
141
142 sigjmp_buf jmpbuf;
143 TCGOp *prev_insn_start;
144 TCGOp *prev_insn_end;
145 } DisasContext;
146
147 /*
148 * Point EIP to next instruction before ending translation.
149 * For instructions that can change hflags.
150 */
151 #define DISAS_EOB_NEXT DISAS_TARGET_0
152
153 /*
154 * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
155 * already set. For instructions that activate interrupt shadow.
156 */
157 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_1
158
159 /*
160 * Return to the main loop; EIP might have already been updated
161 * but even in that case do not use lookup_and_goto_ptr().
162 */
163 #define DISAS_EOB_ONLY DISAS_TARGET_2
164
165 /*
166 * EIP has already been updated. For jumps that wish to use
167 * lookup_and_goto_ptr()
168 */
169 #define DISAS_JUMP DISAS_TARGET_3
170
171 /*
172 * EIP has already been updated. Use updated value of
173 * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
174 */
175 #define DISAS_EOB_RECHECK_TF DISAS_TARGET_4
176
177 /* The environment in which user-only runs is constrained. */
178 #ifdef CONFIG_USER_ONLY
179 #define PE(S) true
180 #define CPL(S) 3
181 #define IOPL(S) 0
182 #define SVME(S) false
183 #define GUEST(S) false
184 #else
185 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
186 #define CPL(S) ((S)->cpl)
187 #define IOPL(S) ((S)->iopl)
188 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
189 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
190 #endif
191 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
192 #define VM86(S) false
193 #define CODE32(S) true
194 #define SS32(S) true
195 #define ADDSEG(S) false
196 #else
197 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
198 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
199 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
200 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
201 #endif
202 #if !defined(TARGET_X86_64)
203 #define CODE64(S) false
204 #elif defined(CONFIG_USER_ONLY)
205 #define CODE64(S) true
206 #else
207 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
208 #endif
209 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
210 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
211 #else
212 #define LMA(S) false
213 #endif
214
215 #ifdef TARGET_X86_64
216 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
217 #define REX_W(S) ((S)->vex_w)
218 #define REX_R(S) ((S)->rex_r + 0)
219 #define REX_X(S) ((S)->rex_x + 0)
220 #define REX_B(S) ((S)->rex_b + 0)
221 #else
222 #define REX_PREFIX(S) false
223 #define REX_W(S) false
224 #define REX_R(S) 0
225 #define REX_X(S) 0
226 #define REX_B(S) 0
227 #endif
228
229 /*
230 * Many sysemu-only helpers are not reachable for user-only.
231 * Define stub generators here, so that we need not either sprinkle
232 * ifdefs through the translator, nor provide the helper function.
233 */
234 #define STUB_HELPER(NAME, ...) \
235 static inline void gen_helper_##NAME(__VA_ARGS__) \
236 { qemu_build_not_reached(); }
237
238 #ifdef CONFIG_USER_ONLY
239 STUB_HELPER(clgi, TCGv_env env)
240 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
241 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
242 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
245 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
246 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
247 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(stgi, TCGv_env env)
250 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
251 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
252 STUB_HELPER(vmmcall, TCGv_env env)
253 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
254 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
255 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
256 #endif
257
258 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
259 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
260 static void gen_exception_gpf(DisasContext *s);
261
262 /* i386 shift ops */
263 enum {
264 OP_ROL,
265 OP_ROR,
266 OP_RCL,
267 OP_RCR,
268 OP_SHL,
269 OP_SHR,
270 OP_SHL1, /* undocumented */
271 OP_SAR = 7,
272 };
273
274 enum {
275 JCC_O,
276 JCC_B,
277 JCC_Z,
278 JCC_BE,
279 JCC_S,
280 JCC_P,
281 JCC_L,
282 JCC_LE,
283 };
284
285 enum {
286 USES_CC_DST = 1,
287 USES_CC_SRC = 2,
288 USES_CC_SRC2 = 4,
289 USES_CC_SRCT = 8,
290 };
291
292 /* Bit set if the global variable is live after setting CC_OP to X. */
293 static const uint8_t cc_op_live[CC_OP_NB] = {
294 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
295 [CC_OP_EFLAGS] = USES_CC_SRC,
296 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
297 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
298 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
299 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
300 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
301 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
302 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
303 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
304 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
305 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
306 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
307 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
308 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
309 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310 [CC_OP_CLR] = 0,
311 [CC_OP_POPCNT] = USES_CC_DST,
312 };
313
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)314 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
315 {
316 int dead;
317
318 if (s->cc_op == op) {
319 return;
320 }
321
322 /* Discard CC computation that will no longer be used. */
323 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
324 if (dead & USES_CC_DST) {
325 tcg_gen_discard_tl(cpu_cc_dst);
326 }
327 if (dead & USES_CC_SRC) {
328 tcg_gen_discard_tl(cpu_cc_src);
329 }
330 if (dead & USES_CC_SRC2) {
331 tcg_gen_discard_tl(cpu_cc_src2);
332 }
333 if (dead & USES_CC_SRCT) {
334 tcg_gen_discard_tl(s->cc_srcT);
335 }
336
337 if (dirty && s->cc_op == CC_OP_DYNAMIC) {
338 tcg_gen_discard_i32(cpu_cc_op);
339 }
340 s->cc_op_dirty = dirty;
341 s->cc_op = op;
342 }
343
set_cc_op(DisasContext * s,CCOp op)344 static void set_cc_op(DisasContext *s, CCOp op)
345 {
346 /*
347 * The DYNAMIC setting is translator only, everything else
348 * will be spilled later.
349 */
350 set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
351 }
352
assume_cc_op(DisasContext * s,CCOp op)353 static void assume_cc_op(DisasContext *s, CCOp op)
354 {
355 set_cc_op_1(s, op, false);
356 }
357
gen_update_cc_op(DisasContext * s)358 static void gen_update_cc_op(DisasContext *s)
359 {
360 if (s->cc_op_dirty) {
361 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
362 s->cc_op_dirty = false;
363 }
364 }
365
366 #ifdef TARGET_X86_64
367
368 #define NB_OP_SIZES 4
369
370 #else /* !TARGET_X86_64 */
371
372 #define NB_OP_SIZES 3
373
374 #endif /* !TARGET_X86_64 */
375
376 #if HOST_BIG_ENDIAN
377 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
378 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
379 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
380 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
381 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
382 #else
383 #define REG_B_OFFSET 0
384 #define REG_H_OFFSET 1
385 #define REG_W_OFFSET 0
386 #define REG_L_OFFSET 0
387 #define REG_LH_OFFSET 4
388 #endif
389
390 /* In instruction encodings for byte register accesses the
391 * register number usually indicates "low 8 bits of register N";
392 * however there are some special cases where N 4..7 indicates
393 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
394 * true for this special case, false otherwise.
395 */
byte_reg_is_xH(DisasContext * s,int reg)396 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
397 {
398 /* Any time the REX prefix is present, byte registers are uniform */
399 if (reg < 4 || REX_PREFIX(s)) {
400 return false;
401 }
402 return true;
403 }
404
405 /* Select the size of a push/pop operation. */
mo_pushpop(DisasContext * s,MemOp ot)406 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
407 {
408 if (CODE64(s)) {
409 return ot == MO_16 ? MO_16 : MO_64;
410 } else {
411 return ot;
412 }
413 }
414
415 /* Select the size of the stack pointer. */
mo_stacksize(DisasContext * s)416 static inline MemOp mo_stacksize(DisasContext *s)
417 {
418 return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
419 }
420
421 /* Compute the result of writing t0 to the OT-sized register REG.
422 *
423 * If DEST is NULL, store the result into the register and return the
424 * register's TCGv.
425 *
426 * If DEST is not NULL, store the result into DEST and return the
427 * register's TCGv.
428 */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)429 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
430 {
431 switch(ot) {
432 case MO_8:
433 if (byte_reg_is_xH(s, reg)) {
434 dest = dest ? dest : cpu_regs[reg - 4];
435 tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
436 return cpu_regs[reg - 4];
437 }
438 dest = dest ? dest : cpu_regs[reg];
439 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
440 break;
441 case MO_16:
442 dest = dest ? dest : cpu_regs[reg];
443 tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
444 break;
445 case MO_32:
446 /* For x86_64, this sets the higher half of register to zero.
447 For i386, this is equivalent to a mov. */
448 dest = dest ? dest : cpu_regs[reg];
449 tcg_gen_ext32u_tl(dest, t0);
450 break;
451 #ifdef TARGET_X86_64
452 case MO_64:
453 dest = dest ? dest : cpu_regs[reg];
454 tcg_gen_mov_tl(dest, t0);
455 break;
456 #endif
457 default:
458 g_assert_not_reached();
459 }
460 return cpu_regs[reg];
461 }
462
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)463 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
464 {
465 gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
466 }
467
468 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)469 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
470 {
471 if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
472 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
473 } else {
474 tcg_gen_mov_tl(t0, cpu_regs[reg]);
475 }
476 }
477
gen_add_A0_im(DisasContext * s,int val)478 static void gen_add_A0_im(DisasContext *s, int val)
479 {
480 tcg_gen_addi_tl(s->A0, s->A0, val);
481 if (!CODE64(s)) {
482 tcg_gen_ext32u_tl(s->A0, s->A0);
483 }
484 }
485
gen_op_jmp_v(DisasContext * s,TCGv dest)486 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
487 {
488 tcg_gen_mov_tl(cpu_eip, dest);
489 s->pc_save = -1;
490 }
491
492 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)493 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
494 {
495 tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
496 gen_op_mov_reg_v(s, size, reg, s->tmp0);
497 }
498
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)499 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
500 {
501 tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
502 gen_op_mov_reg_v(s, size, reg, s->tmp0);
503 }
504
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)505 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
506 {
507 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
508 }
509
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)510 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
511 {
512 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
513 }
514
gen_update_eip_next(DisasContext * s)515 static void gen_update_eip_next(DisasContext *s)
516 {
517 assert(s->pc_save != -1);
518 if (tb_cflags(s->base.tb) & CF_PCREL) {
519 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
520 } else if (CODE64(s)) {
521 tcg_gen_movi_tl(cpu_eip, s->pc);
522 } else {
523 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
524 }
525 s->pc_save = s->pc;
526 }
527
gen_update_eip_cur(DisasContext * s)528 static void gen_update_eip_cur(DisasContext *s)
529 {
530 assert(s->pc_save != -1);
531 if (tb_cflags(s->base.tb) & CF_PCREL) {
532 tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
533 } else if (CODE64(s)) {
534 tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
535 } else {
536 tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
537 }
538 s->pc_save = s->base.pc_next;
539 }
540
cur_insn_len(DisasContext * s)541 static int cur_insn_len(DisasContext *s)
542 {
543 return s->pc - s->base.pc_next;
544 }
545
cur_insn_len_i32(DisasContext * s)546 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
547 {
548 return tcg_constant_i32(cur_insn_len(s));
549 }
550
eip_next_i32(DisasContext * s)551 static TCGv_i32 eip_next_i32(DisasContext *s)
552 {
553 assert(s->pc_save != -1);
554 /*
555 * This function has two users: lcall_real (always 16-bit mode), and
556 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
557 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
558 * why passing a 32-bit value isn't broken. To avoid using this where
559 * we shouldn't, return -1 in 64-bit mode so that execution goes into
560 * the weeds quickly.
561 */
562 if (CODE64(s)) {
563 return tcg_constant_i32(-1);
564 }
565 if (tb_cflags(s->base.tb) & CF_PCREL) {
566 TCGv_i32 ret = tcg_temp_new_i32();
567 tcg_gen_trunc_tl_i32(ret, cpu_eip);
568 tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
569 return ret;
570 } else {
571 return tcg_constant_i32(s->pc - s->cs_base);
572 }
573 }
574
eip_next_tl(DisasContext * s)575 static TCGv eip_next_tl(DisasContext *s)
576 {
577 assert(s->pc_save != -1);
578 if (tb_cflags(s->base.tb) & CF_PCREL) {
579 TCGv ret = tcg_temp_new();
580 tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
581 return ret;
582 } else if (CODE64(s)) {
583 return tcg_constant_tl(s->pc);
584 } else {
585 return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
586 }
587 }
588
eip_cur_tl(DisasContext * s)589 static TCGv eip_cur_tl(DisasContext *s)
590 {
591 assert(s->pc_save != -1);
592 if (tb_cflags(s->base.tb) & CF_PCREL) {
593 TCGv ret = tcg_temp_new();
594 tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
595 return ret;
596 } else if (CODE64(s)) {
597 return tcg_constant_tl(s->base.pc_next);
598 } else {
599 return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
600 }
601 }
602
603 /* Compute SEG:REG into DEST. SEG is selected from the override segment
604 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
605 indicate no override. */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)606 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
607 int def_seg, int ovr_seg)
608 {
609 switch (aflag) {
610 #ifdef TARGET_X86_64
611 case MO_64:
612 if (ovr_seg < 0) {
613 tcg_gen_mov_tl(dest, a0);
614 return;
615 }
616 break;
617 #endif
618 case MO_32:
619 /* 32 bit address */
620 if (ovr_seg < 0 && ADDSEG(s)) {
621 ovr_seg = def_seg;
622 }
623 if (ovr_seg < 0) {
624 tcg_gen_ext32u_tl(dest, a0);
625 return;
626 }
627 break;
628 case MO_16:
629 /* 16 bit address */
630 tcg_gen_ext16u_tl(dest, a0);
631 a0 = dest;
632 if (ovr_seg < 0) {
633 if (ADDSEG(s)) {
634 ovr_seg = def_seg;
635 } else {
636 return;
637 }
638 }
639 break;
640 default:
641 g_assert_not_reached();
642 }
643
644 if (ovr_seg >= 0) {
645 TCGv seg = cpu_seg_base[ovr_seg];
646
647 if (aflag == MO_64) {
648 tcg_gen_add_tl(dest, a0, seg);
649 } else if (CODE64(s)) {
650 tcg_gen_ext32u_tl(dest, a0);
651 tcg_gen_add_tl(dest, dest, seg);
652 } else {
653 tcg_gen_add_tl(dest, a0, seg);
654 tcg_gen_ext32u_tl(dest, dest);
655 }
656 }
657 }
658
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)659 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
660 int def_seg, int ovr_seg)
661 {
662 gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
663 }
664
gen_string_movl_A0_ESI(DisasContext * s)665 static inline void gen_string_movl_A0_ESI(DisasContext *s)
666 {
667 gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
668 }
669
gen_string_movl_A0_EDI(DisasContext * s)670 static inline void gen_string_movl_A0_EDI(DisasContext *s)
671 {
672 gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
673 }
674
gen_compute_Dshift(DisasContext * s,MemOp ot)675 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
676 {
677 TCGv dshift = tcg_temp_new();
678 tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
679 tcg_gen_shli_tl(dshift, dshift, ot);
680 return dshift;
681 };
682
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)683 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
684 {
685 if (size == MO_TL) {
686 return src;
687 }
688 if (!dst) {
689 dst = tcg_temp_new();
690 }
691 tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
692 return dst;
693 }
694
gen_exts(MemOp ot,TCGv reg)695 static void gen_exts(MemOp ot, TCGv reg)
696 {
697 gen_ext_tl(reg, reg, ot, true);
698 }
699
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)700 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
701 {
702 TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
703
704 tcg_gen_brcondi_tl(cond, tmp, 0, label1);
705 }
706
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)707 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
708 {
709 gen_op_j_ecx(s, TCG_COND_EQ, label1);
710 }
711
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)712 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
713 {
714 gen_op_j_ecx(s, TCG_COND_NE, label1);
715 }
716
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)717 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
718 {
719 switch (ot) {
720 case MO_8:
721 gen_helper_inb(v, tcg_env, n);
722 break;
723 case MO_16:
724 gen_helper_inw(v, tcg_env, n);
725 break;
726 case MO_32:
727 gen_helper_inl(v, tcg_env, n);
728 break;
729 default:
730 g_assert_not_reached();
731 }
732 }
733
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)734 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
735 {
736 switch (ot) {
737 case MO_8:
738 gen_helper_outb(tcg_env, v, n);
739 break;
740 case MO_16:
741 gen_helper_outw(tcg_env, v, n);
742 break;
743 case MO_32:
744 gen_helper_outl(tcg_env, v, n);
745 break;
746 default:
747 g_assert_not_reached();
748 }
749 }
750
751 /*
752 * Validate that access to [port, port + 1<<ot) is allowed.
753 * Raise #GP, or VMM exit if not.
754 */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)755 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
756 uint32_t svm_flags)
757 {
758 #ifdef CONFIG_USER_ONLY
759 /*
760 * We do not implement the ioperm(2) syscall, so the TSS check
761 * will always fail.
762 */
763 gen_exception_gpf(s);
764 return false;
765 #else
766 if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
767 gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
768 }
769 if (GUEST(s)) {
770 gen_update_cc_op(s);
771 gen_update_eip_cur(s);
772 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
773 svm_flags |= SVM_IOIO_REP_MASK;
774 }
775 svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
776 gen_helper_svm_check_io(tcg_env, port,
777 tcg_constant_i32(svm_flags),
778 cur_insn_len_i32(s));
779 }
780 return true;
781 #endif
782 }
783
gen_movs(DisasContext * s,MemOp ot)784 static void gen_movs(DisasContext *s, MemOp ot)
785 {
786 TCGv dshift;
787
788 gen_string_movl_A0_ESI(s);
789 gen_op_ld_v(s, ot, s->T0, s->A0);
790 gen_string_movl_A0_EDI(s);
791 gen_op_st_v(s, ot, s->T0, s->A0);
792
793 dshift = gen_compute_Dshift(s, ot);
794 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
795 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
796 }
797
798 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)799 static void gen_mov_eflags(DisasContext *s, TCGv reg)
800 {
801 TCGv dst, src1, src2;
802 TCGv_i32 cc_op;
803 int live, dead;
804
805 if (s->cc_op == CC_OP_EFLAGS) {
806 tcg_gen_mov_tl(reg, cpu_cc_src);
807 return;
808 }
809 if (s->cc_op == CC_OP_CLR) {
810 tcg_gen_movi_tl(reg, CC_Z | CC_P);
811 return;
812 }
813
814 dst = cpu_cc_dst;
815 src1 = cpu_cc_src;
816 src2 = cpu_cc_src2;
817
818 /* Take care to not read values that are not live. */
819 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
820 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
821 if (dead) {
822 TCGv zero = tcg_constant_tl(0);
823 if (dead & USES_CC_DST) {
824 dst = zero;
825 }
826 if (dead & USES_CC_SRC) {
827 src1 = zero;
828 }
829 if (dead & USES_CC_SRC2) {
830 src2 = zero;
831 }
832 }
833
834 if (s->cc_op != CC_OP_DYNAMIC) {
835 cc_op = tcg_constant_i32(s->cc_op);
836 } else {
837 cc_op = cpu_cc_op;
838 }
839 gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
840 }
841
842 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)843 static void gen_compute_eflags(DisasContext *s)
844 {
845 gen_mov_eflags(s, cpu_cc_src);
846 set_cc_op(s, CC_OP_EFLAGS);
847 }
848
849 typedef struct CCPrepare {
850 TCGCond cond;
851 TCGv reg;
852 TCGv reg2;
853 target_ulong imm;
854 bool use_reg2;
855 bool no_setcond;
856 } CCPrepare;
857
gen_prepare_sign_nz(TCGv src,MemOp size)858 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
859 {
860 if (size == MO_TL) {
861 return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
862 } else {
863 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
864 .imm = 1ull << ((8 << size) - 1) };
865 }
866 }
867
868 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)869 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
870 {
871 MemOp size;
872
873 switch (s->cc_op) {
874 case CC_OP_SUBB ... CC_OP_SUBQ:
875 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
876 size = s->cc_op - CC_OP_SUBB;
877 gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
878 gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
879 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
880 .reg2 = cpu_cc_src, .use_reg2 = true };
881
882 case CC_OP_ADDB ... CC_OP_ADDQ:
883 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
884 size = s->cc_op - CC_OP_ADDB;
885 gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
886 gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
887 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
888 .reg2 = cpu_cc_src, .use_reg2 = true };
889
890 case CC_OP_LOGICB ... CC_OP_LOGICQ:
891 case CC_OP_CLR:
892 case CC_OP_POPCNT:
893 return (CCPrepare) { .cond = TCG_COND_NEVER };
894
895 case CC_OP_INCB ... CC_OP_INCQ:
896 case CC_OP_DECB ... CC_OP_DECQ:
897 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
898 .no_setcond = true };
899
900 case CC_OP_SHLB ... CC_OP_SHLQ:
901 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
902 size = s->cc_op - CC_OP_SHLB;
903 return gen_prepare_sign_nz(cpu_cc_src, size);
904
905 case CC_OP_MULB ... CC_OP_MULQ:
906 return (CCPrepare) { .cond = TCG_COND_NE,
907 .reg = cpu_cc_src };
908
909 case CC_OP_BMILGB ... CC_OP_BMILGQ:
910 size = s->cc_op - CC_OP_BMILGB;
911 gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
912 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
913
914 case CC_OP_ADCX:
915 case CC_OP_ADCOX:
916 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
917 .no_setcond = true };
918
919 case CC_OP_EFLAGS:
920 case CC_OP_SARB ... CC_OP_SARQ:
921 /* CC_SRC & 1 */
922 return (CCPrepare) { .cond = TCG_COND_TSTNE,
923 .reg = cpu_cc_src, .imm = CC_C };
924
925 default:
926 /* The need to compute only C from CC_OP_DYNAMIC is important
927 in efficiently implementing e.g. INC at the start of a TB. */
928 gen_update_cc_op(s);
929 if (!reg) {
930 reg = tcg_temp_new();
931 }
932 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
933 cpu_cc_src2, cpu_cc_op);
934 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
935 .no_setcond = true };
936 }
937 }
938
939 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)940 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
941 {
942 gen_compute_eflags(s);
943 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
944 .imm = CC_P };
945 }
946
947 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)948 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
949 {
950 switch (s->cc_op) {
951 case CC_OP_DYNAMIC:
952 gen_compute_eflags(s);
953 /* FALLTHRU */
954 case CC_OP_EFLAGS:
955 case CC_OP_ADCX:
956 case CC_OP_ADOX:
957 case CC_OP_ADCOX:
958 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
959 .imm = CC_S };
960 case CC_OP_CLR:
961 case CC_OP_POPCNT:
962 return (CCPrepare) { .cond = TCG_COND_NEVER };
963 default:
964 {
965 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
966 return gen_prepare_sign_nz(cpu_cc_dst, size);
967 }
968 }
969 }
970
971 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)972 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
973 {
974 switch (s->cc_op) {
975 case CC_OP_ADOX:
976 case CC_OP_ADCOX:
977 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
978 .no_setcond = true };
979 case CC_OP_CLR:
980 case CC_OP_POPCNT:
981 return (CCPrepare) { .cond = TCG_COND_NEVER };
982 case CC_OP_MULB ... CC_OP_MULQ:
983 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
984 default:
985 gen_compute_eflags(s);
986 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
987 .imm = CC_O };
988 }
989 }
990
991 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)992 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
993 {
994 switch (s->cc_op) {
995 case CC_OP_DYNAMIC:
996 gen_compute_eflags(s);
997 /* FALLTHRU */
998 case CC_OP_EFLAGS:
999 case CC_OP_ADCX:
1000 case CC_OP_ADOX:
1001 case CC_OP_ADCOX:
1002 return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1003 .imm = CC_Z };
1004 case CC_OP_CLR:
1005 return (CCPrepare) { .cond = TCG_COND_ALWAYS };
1006 default:
1007 {
1008 MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1009 if (size == MO_TL) {
1010 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1011 } else {
1012 return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst,
1013 .imm = (1ull << (8 << size)) - 1 };
1014 }
1015 }
1016 }
1017 }
1018
1019 /* return how to compute jump opcode 'b'. 'reg' can be clobbered
1020 * if needed; it may be used for CCPrepare.reg if that will
1021 * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1022 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1023 {
1024 int inv, jcc_op, cond;
1025 MemOp size;
1026 CCPrepare cc;
1027
1028 inv = b & 1;
1029 jcc_op = (b >> 1) & 7;
1030
1031 switch (s->cc_op) {
1032 case CC_OP_SUBB ... CC_OP_SUBQ:
1033 /* We optimize relational operators for the cmp/jcc case. */
1034 size = s->cc_op - CC_OP_SUBB;
1035 switch (jcc_op) {
1036 case JCC_BE:
1037 gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
1038 gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
1039 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1040 .reg2 = cpu_cc_src, .use_reg2 = true };
1041 break;
1042 case JCC_L:
1043 cond = TCG_COND_LT;
1044 goto fast_jcc_l;
1045 case JCC_LE:
1046 cond = TCG_COND_LE;
1047 fast_jcc_l:
1048 gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
1049 gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
1050 cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1051 .reg2 = cpu_cc_src, .use_reg2 = true };
1052 break;
1053
1054 default:
1055 goto slow_jcc;
1056 }
1057 break;
1058
1059 default:
1060 slow_jcc:
1061 /* This actually generates good code for JC, JZ and JS. */
1062 switch (jcc_op) {
1063 case JCC_O:
1064 cc = gen_prepare_eflags_o(s, reg);
1065 break;
1066 case JCC_B:
1067 cc = gen_prepare_eflags_c(s, reg);
1068 break;
1069 case JCC_Z:
1070 cc = gen_prepare_eflags_z(s, reg);
1071 break;
1072 case JCC_BE:
1073 gen_compute_eflags(s);
1074 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1075 .imm = CC_Z | CC_C };
1076 break;
1077 case JCC_S:
1078 cc = gen_prepare_eflags_s(s, reg);
1079 break;
1080 case JCC_P:
1081 cc = gen_prepare_eflags_p(s, reg);
1082 break;
1083 case JCC_L:
1084 gen_compute_eflags(s);
1085 if (!reg || reg == cpu_cc_src) {
1086 reg = tcg_temp_new();
1087 }
1088 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1089 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1090 .imm = CC_O };
1091 break;
1092 default:
1093 case JCC_LE:
1094 gen_compute_eflags(s);
1095 if (!reg || reg == cpu_cc_src) {
1096 reg = tcg_temp_new();
1097 }
1098 tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1099 cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1100 .imm = CC_O | CC_Z };
1101 break;
1102 }
1103 break;
1104 }
1105
1106 if (inv) {
1107 cc.cond = tcg_invert_cond(cc.cond);
1108 }
1109 return cc;
1110 }
1111
gen_setcc1(DisasContext * s,int b,TCGv reg)1112 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1113 {
1114 CCPrepare cc = gen_prepare_cc(s, b, reg);
1115
1116 if (cc.no_setcond) {
1117 if (cc.cond == TCG_COND_EQ) {
1118 tcg_gen_xori_tl(reg, cc.reg, 1);
1119 } else {
1120 tcg_gen_mov_tl(reg, cc.reg);
1121 }
1122 return;
1123 }
1124
1125 if (cc.use_reg2) {
1126 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1127 } else {
1128 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1129 }
1130 }
1131
gen_compute_eflags_c(DisasContext * s,TCGv reg)1132 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1133 {
1134 gen_setcc1(s, JCC_B << 1, reg);
1135 }
1136
1137 /* generate a conditional jump to label 'l1' according to jump opcode
1138 value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc1_noeob(DisasContext * s,int b,TCGLabel * l1)1139 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1140 {
1141 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1142
1143 if (cc.use_reg2) {
1144 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1145 } else {
1146 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1147 }
1148 }
1149
1150 /* Generate a conditional jump to label 'l1' according to jump opcode
1151 value 'b'. In the fast case, T0 is guaranteed not to be used.
1152 One or both of the branches will call gen_jmp_rel, so ensure
1153 cc_op is clean. */
gen_jcc1(DisasContext * s,int b,TCGLabel * l1)1154 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1155 {
1156 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1157
1158 gen_update_cc_op(s);
1159 if (cc.use_reg2) {
1160 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1161 } else {
1162 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1163 }
1164 }
1165
1166 /* XXX: does not work with gdbstub "ice" single step - not a
1167 serious problem. The caller can jump to the returned label
1168 to stop the REP but, if the flags have changed, it has to call
1169 gen_update_cc_op before doing so. */
gen_jz_ecx_string(DisasContext * s)1170 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1171 {
1172 TCGLabel *l1 = gen_new_label();
1173 TCGLabel *l2 = gen_new_label();
1174
1175 gen_update_cc_op(s);
1176 gen_op_jnz_ecx(s, l1);
1177 gen_set_label(l2);
1178 gen_jmp_rel_csize(s, 0, 1);
1179 gen_set_label(l1);
1180 return l2;
1181 }
1182
gen_stos(DisasContext * s,MemOp ot)1183 static void gen_stos(DisasContext *s, MemOp ot)
1184 {
1185 gen_string_movl_A0_EDI(s);
1186 gen_op_st_v(s, ot, s->T0, s->A0);
1187 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1188 }
1189
gen_lods(DisasContext * s,MemOp ot)1190 static void gen_lods(DisasContext *s, MemOp ot)
1191 {
1192 gen_string_movl_A0_ESI(s);
1193 gen_op_ld_v(s, ot, s->T0, s->A0);
1194 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1195 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1196 }
1197
gen_scas(DisasContext * s,MemOp ot)1198 static void gen_scas(DisasContext *s, MemOp ot)
1199 {
1200 gen_string_movl_A0_EDI(s);
1201 gen_op_ld_v(s, ot, s->T1, s->A0);
1202 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1203 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1204 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1205 set_cc_op(s, CC_OP_SUBB + ot);
1206
1207 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1208 }
1209
gen_cmps(DisasContext * s,MemOp ot)1210 static void gen_cmps(DisasContext *s, MemOp ot)
1211 {
1212 TCGv dshift;
1213
1214 gen_string_movl_A0_EDI(s);
1215 gen_op_ld_v(s, ot, s->T1, s->A0);
1216 gen_string_movl_A0_ESI(s);
1217 gen_op_ld_v(s, ot, s->T0, s->A0);
1218 tcg_gen_mov_tl(cpu_cc_src, s->T1);
1219 tcg_gen_mov_tl(s->cc_srcT, s->T0);
1220 tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1221 set_cc_op(s, CC_OP_SUBB + ot);
1222
1223 dshift = gen_compute_Dshift(s, ot);
1224 gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1225 gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1226 }
1227
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1228 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1229 {
1230 if (s->flags & HF_IOBPT_MASK) {
1231 #ifdef CONFIG_USER_ONLY
1232 /* user-mode cpu should not be in IOBPT mode */
1233 g_assert_not_reached();
1234 #else
1235 TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1236 TCGv t_next = eip_next_tl(s);
1237 gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1238 #endif /* CONFIG_USER_ONLY */
1239 }
1240 }
1241
gen_ins(DisasContext * s,MemOp ot)1242 static void gen_ins(DisasContext *s, MemOp ot)
1243 {
1244 gen_string_movl_A0_EDI(s);
1245 /* Note: we must do this dummy write first to be restartable in
1246 case of page fault. */
1247 tcg_gen_movi_tl(s->T0, 0);
1248 gen_op_st_v(s, ot, s->T0, s->A0);
1249 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1250 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1251 gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1252 gen_op_st_v(s, ot, s->T0, s->A0);
1253 gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1254 gen_bpt_io(s, s->tmp2_i32, ot);
1255 }
1256
gen_outs(DisasContext * s,MemOp ot)1257 static void gen_outs(DisasContext *s, MemOp ot)
1258 {
1259 gen_string_movl_A0_ESI(s);
1260 gen_op_ld_v(s, ot, s->T0, s->A0);
1261
1262 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1263 tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1264 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1265 gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1266 gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1267 gen_bpt_io(s, s->tmp2_i32, ot);
1268 }
1269
1270 /* Generate jumps to current or next instruction */
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1271 static void gen_repz(DisasContext *s, MemOp ot,
1272 void (*fn)(DisasContext *s, MemOp ot))
1273 {
1274 TCGLabel *l2;
1275 l2 = gen_jz_ecx_string(s);
1276 fn(s, ot);
1277 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1278 /*
1279 * A loop would cause two single step exceptions if ECX = 1
1280 * before rep string_insn
1281 */
1282 if (s->repz_opt) {
1283 gen_op_jz_ecx(s, l2);
1284 }
1285 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1286 }
1287
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1288 static void gen_repz_nz(DisasContext *s, MemOp ot,
1289 void (*fn)(DisasContext *s, MemOp ot))
1290 {
1291 TCGLabel *l2;
1292 int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1293
1294 l2 = gen_jz_ecx_string(s);
1295 fn(s, ot);
1296 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1297 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1298 if (s->repz_opt) {
1299 gen_op_jz_ecx(s, l2);
1300 }
1301 /*
1302 * Only one iteration is done at a time, so the translation
1303 * block ends unconditionally after this instruction and there
1304 * is no control flow junction - no need to set CC_OP_DYNAMIC.
1305 */
1306 gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1307 }
1308
gen_helper_fp_arith_ST0_FT0(int op)1309 static void gen_helper_fp_arith_ST0_FT0(int op)
1310 {
1311 switch (op) {
1312 case 0:
1313 gen_helper_fadd_ST0_FT0(tcg_env);
1314 break;
1315 case 1:
1316 gen_helper_fmul_ST0_FT0(tcg_env);
1317 break;
1318 case 2:
1319 gen_helper_fcom_ST0_FT0(tcg_env);
1320 break;
1321 case 3:
1322 gen_helper_fcom_ST0_FT0(tcg_env);
1323 break;
1324 case 4:
1325 gen_helper_fsub_ST0_FT0(tcg_env);
1326 break;
1327 case 5:
1328 gen_helper_fsubr_ST0_FT0(tcg_env);
1329 break;
1330 case 6:
1331 gen_helper_fdiv_ST0_FT0(tcg_env);
1332 break;
1333 case 7:
1334 gen_helper_fdivr_ST0_FT0(tcg_env);
1335 break;
1336 }
1337 }
1338
1339 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1340 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1341 {
1342 TCGv_i32 tmp = tcg_constant_i32(opreg);
1343 switch (op) {
1344 case 0:
1345 gen_helper_fadd_STN_ST0(tcg_env, tmp);
1346 break;
1347 case 1:
1348 gen_helper_fmul_STN_ST0(tcg_env, tmp);
1349 break;
1350 case 4:
1351 gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1352 break;
1353 case 5:
1354 gen_helper_fsub_STN_ST0(tcg_env, tmp);
1355 break;
1356 case 6:
1357 gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1358 break;
1359 case 7:
1360 gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1361 break;
1362 }
1363 }
1364
gen_exception(DisasContext * s,int trapno)1365 static void gen_exception(DisasContext *s, int trapno)
1366 {
1367 gen_update_cc_op(s);
1368 gen_update_eip_cur(s);
1369 gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1370 s->base.is_jmp = DISAS_NORETURN;
1371 }
1372
1373 /* Generate #UD for the current instruction. The assumption here is that
1374 the instruction is known, but it isn't allowed in the current cpu mode. */
gen_illegal_opcode(DisasContext * s)1375 static void gen_illegal_opcode(DisasContext *s)
1376 {
1377 gen_exception(s, EXCP06_ILLOP);
1378 }
1379
1380 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1381 static void gen_exception_gpf(DisasContext *s)
1382 {
1383 gen_exception(s, EXCP0D_GPF);
1384 }
1385
1386 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1387 static bool check_cpl0(DisasContext *s)
1388 {
1389 if (CPL(s) == 0) {
1390 return true;
1391 }
1392 gen_exception_gpf(s);
1393 return false;
1394 }
1395
1396 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,bool is_right,TCGv count)1397 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
1398 bool is_right, TCGv count)
1399 {
1400 target_ulong mask = (ot == MO_64 ? 63 : 31);
1401
1402 switch (ot) {
1403 case MO_16:
1404 /* Note: we implement the Intel behaviour for shift count > 16.
1405 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1406 portion by constructing it as a 32-bit value. */
1407 if (is_right) {
1408 tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1409 tcg_gen_mov_tl(s->T1, s->T0);
1410 tcg_gen_mov_tl(s->T0, s->tmp0);
1411 } else {
1412 tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1413 }
1414 /*
1415 * If TARGET_X86_64 defined then fall through into MO_32 case,
1416 * otherwise fall through default case.
1417 */
1418 case MO_32:
1419 #ifdef TARGET_X86_64
1420 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1421 tcg_gen_subi_tl(s->tmp0, count, 1);
1422 if (is_right) {
1423 tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1424 tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1425 tcg_gen_shr_i64(s->T0, s->T0, count);
1426 } else {
1427 tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1428 tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1429 tcg_gen_shl_i64(s->T0, s->T0, count);
1430 tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1431 tcg_gen_shri_i64(s->T0, s->T0, 32);
1432 }
1433 break;
1434 #endif
1435 default:
1436 tcg_gen_subi_tl(s->tmp0, count, 1);
1437 if (is_right) {
1438 tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1439
1440 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1441 tcg_gen_shr_tl(s->T0, s->T0, count);
1442 tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1443 } else {
1444 tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1445 if (ot == MO_16) {
1446 /* Only needed if count > 16, for Intel behaviour. */
1447 tcg_gen_subfi_tl(s->tmp4, 33, count);
1448 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1449 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1450 }
1451
1452 tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1453 tcg_gen_shl_tl(s->T0, s->T0, count);
1454 tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1455 }
1456 tcg_gen_movi_tl(s->tmp4, 0);
1457 tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1458 s->tmp4, s->T1);
1459 tcg_gen_or_tl(s->T0, s->T0, s->T1);
1460 break;
1461 }
1462 }
1463
1464 #define X86_MAX_INSN_LENGTH 15
1465
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1466 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1467 {
1468 uint64_t pc = s->pc;
1469
1470 /* This is a subsequent insn that crosses a page boundary. */
1471 if (s->base.num_insns > 1 &&
1472 !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1473 siglongjmp(s->jmpbuf, 2);
1474 }
1475
1476 s->pc += num_bytes;
1477 if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1478 /* If the instruction's 16th byte is on a different page than the 1st, a
1479 * page fault on the second page wins over the general protection fault
1480 * caused by the instruction being too long.
1481 * This can happen even if the operand is only one byte long!
1482 */
1483 if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1484 (void)translator_ldub(env, &s->base,
1485 (s->pc - 1) & TARGET_PAGE_MASK);
1486 }
1487 siglongjmp(s->jmpbuf, 1);
1488 }
1489
1490 return pc;
1491 }
1492
x86_ldub_code(CPUX86State * env,DisasContext * s)1493 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1494 {
1495 return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1496 }
1497
x86_lduw_code(CPUX86State * env,DisasContext * s)1498 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1499 {
1500 return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1501 }
1502
x86_ldl_code(CPUX86State * env,DisasContext * s)1503 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1504 {
1505 return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1506 }
1507
1508 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1509 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1510 {
1511 return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1512 }
1513 #endif
1514
1515 /* Decompose an address. */
1516
1517 typedef struct AddressParts {
1518 int def_seg;
1519 int base;
1520 int index;
1521 int scale;
1522 target_long disp;
1523 } AddressParts;
1524
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm)1525 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1526 int modrm)
1527 {
1528 int def_seg, base, index, scale, mod, rm;
1529 target_long disp;
1530 bool havesib;
1531
1532 def_seg = R_DS;
1533 index = -1;
1534 scale = 0;
1535 disp = 0;
1536
1537 mod = (modrm >> 6) & 3;
1538 rm = modrm & 7;
1539 base = rm | REX_B(s);
1540
1541 if (mod == 3) {
1542 /* Normally filtered out earlier, but including this path
1543 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1544 goto done;
1545 }
1546
1547 switch (s->aflag) {
1548 case MO_64:
1549 case MO_32:
1550 havesib = 0;
1551 if (rm == 4) {
1552 int code = x86_ldub_code(env, s);
1553 scale = (code >> 6) & 3;
1554 index = ((code >> 3) & 7) | REX_X(s);
1555 if (index == 4) {
1556 index = -1; /* no index */
1557 }
1558 base = (code & 7) | REX_B(s);
1559 havesib = 1;
1560 }
1561
1562 switch (mod) {
1563 case 0:
1564 if ((base & 7) == 5) {
1565 base = -1;
1566 disp = (int32_t)x86_ldl_code(env, s);
1567 if (CODE64(s) && !havesib) {
1568 base = -2;
1569 disp += s->pc + s->rip_offset;
1570 }
1571 }
1572 break;
1573 case 1:
1574 disp = (int8_t)x86_ldub_code(env, s);
1575 break;
1576 default:
1577 case 2:
1578 disp = (int32_t)x86_ldl_code(env, s);
1579 break;
1580 }
1581
1582 /* For correct popl handling with esp. */
1583 if (base == R_ESP && s->popl_esp_hack) {
1584 disp += s->popl_esp_hack;
1585 }
1586 if (base == R_EBP || base == R_ESP) {
1587 def_seg = R_SS;
1588 }
1589 break;
1590
1591 case MO_16:
1592 if (mod == 0) {
1593 if (rm == 6) {
1594 base = -1;
1595 disp = x86_lduw_code(env, s);
1596 break;
1597 }
1598 } else if (mod == 1) {
1599 disp = (int8_t)x86_ldub_code(env, s);
1600 } else {
1601 disp = (int16_t)x86_lduw_code(env, s);
1602 }
1603
1604 switch (rm) {
1605 case 0:
1606 base = R_EBX;
1607 index = R_ESI;
1608 break;
1609 case 1:
1610 base = R_EBX;
1611 index = R_EDI;
1612 break;
1613 case 2:
1614 base = R_EBP;
1615 index = R_ESI;
1616 def_seg = R_SS;
1617 break;
1618 case 3:
1619 base = R_EBP;
1620 index = R_EDI;
1621 def_seg = R_SS;
1622 break;
1623 case 4:
1624 base = R_ESI;
1625 break;
1626 case 5:
1627 base = R_EDI;
1628 break;
1629 case 6:
1630 base = R_EBP;
1631 def_seg = R_SS;
1632 break;
1633 default:
1634 case 7:
1635 base = R_EBX;
1636 break;
1637 }
1638 break;
1639
1640 default:
1641 g_assert_not_reached();
1642 }
1643
1644 done:
1645 return (AddressParts){ def_seg, base, index, scale, disp };
1646 }
1647
1648 /* Compute the address, with a minimum number of TCG ops. */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1649 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1650 {
1651 TCGv ea = NULL;
1652
1653 if (a.index >= 0 && !is_vsib) {
1654 if (a.scale == 0) {
1655 ea = cpu_regs[a.index];
1656 } else {
1657 tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1658 ea = s->A0;
1659 }
1660 if (a.base >= 0) {
1661 tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1662 ea = s->A0;
1663 }
1664 } else if (a.base >= 0) {
1665 ea = cpu_regs[a.base];
1666 }
1667 if (!ea) {
1668 if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1669 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1670 tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1671 } else {
1672 tcg_gen_movi_tl(s->A0, a.disp);
1673 }
1674 ea = s->A0;
1675 } else if (a.disp != 0) {
1676 tcg_gen_addi_tl(s->A0, ea, a.disp);
1677 ea = s->A0;
1678 }
1679
1680 return ea;
1681 }
1682
gen_lea_modrm(CPUX86State * env,DisasContext * s,int modrm)1683 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1684 {
1685 AddressParts a = gen_lea_modrm_0(env, s, modrm);
1686 TCGv ea = gen_lea_modrm_1(s, a, false);
1687 gen_lea_v_seg(s, ea, a.def_seg, s->override);
1688 }
1689
gen_nop_modrm(CPUX86State * env,DisasContext * s,int modrm)1690 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1691 {
1692 (void)gen_lea_modrm_0(env, s, modrm);
1693 }
1694
1695 /* Used for BNDCL, BNDCU, BNDCN. */
gen_bndck(CPUX86State * env,DisasContext * s,int modrm,TCGCond cond,TCGv_i64 bndv)1696 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1697 TCGCond cond, TCGv_i64 bndv)
1698 {
1699 AddressParts a = gen_lea_modrm_0(env, s, modrm);
1700 TCGv ea = gen_lea_modrm_1(s, a, false);
1701
1702 tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1703 if (!CODE64(s)) {
1704 tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1705 }
1706 tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1707 tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1708 gen_helper_bndck(tcg_env, s->tmp2_i32);
1709 }
1710
1711 /* generate modrm load of memory or register. */
gen_ld_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot)1712 static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1713 {
1714 int mod, rm;
1715
1716 mod = (modrm >> 6) & 3;
1717 rm = (modrm & 7) | REX_B(s);
1718 if (mod == 3) {
1719 gen_op_mov_v_reg(s, ot, s->T0, rm);
1720 } else {
1721 gen_lea_modrm(env, s, modrm);
1722 gen_op_ld_v(s, ot, s->T0, s->A0);
1723 }
1724 }
1725
1726 /* generate modrm store of memory or register. */
gen_st_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot)1727 static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1728 {
1729 int mod, rm;
1730
1731 mod = (modrm >> 6) & 3;
1732 rm = (modrm & 7) | REX_B(s);
1733 if (mod == 3) {
1734 gen_op_mov_reg_v(s, ot, rm, s->T0);
1735 } else {
1736 gen_lea_modrm(env, s, modrm);
1737 gen_op_st_v(s, ot, s->T0, s->A0);
1738 }
1739 }
1740
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1741 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1742 {
1743 target_ulong ret;
1744
1745 switch (ot) {
1746 case MO_8:
1747 ret = x86_ldub_code(env, s);
1748 break;
1749 case MO_16:
1750 ret = x86_lduw_code(env, s);
1751 break;
1752 case MO_32:
1753 ret = x86_ldl_code(env, s);
1754 break;
1755 #ifdef TARGET_X86_64
1756 case MO_64:
1757 ret = x86_ldq_code(env, s);
1758 break;
1759 #endif
1760 default:
1761 g_assert_not_reached();
1762 }
1763 return ret;
1764 }
1765
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1766 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1767 {
1768 uint32_t ret;
1769
1770 switch (ot) {
1771 case MO_8:
1772 ret = x86_ldub_code(env, s);
1773 break;
1774 case MO_16:
1775 ret = x86_lduw_code(env, s);
1776 break;
1777 case MO_32:
1778 #ifdef TARGET_X86_64
1779 case MO_64:
1780 #endif
1781 ret = x86_ldl_code(env, s);
1782 break;
1783 default:
1784 g_assert_not_reached();
1785 }
1786 return ret;
1787 }
1788
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1789 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1790 {
1791 target_long ret;
1792
1793 switch (ot) {
1794 case MO_8:
1795 ret = (int8_t) x86_ldub_code(env, s);
1796 break;
1797 case MO_16:
1798 ret = (int16_t) x86_lduw_code(env, s);
1799 break;
1800 case MO_32:
1801 ret = (int32_t) x86_ldl_code(env, s);
1802 break;
1803 #ifdef TARGET_X86_64
1804 case MO_64:
1805 ret = x86_ldq_code(env, s);
1806 break;
1807 #endif
1808 default:
1809 g_assert_not_reached();
1810 }
1811 return ret;
1812 }
1813
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1814 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1815 TCGLabel *not_taken, TCGLabel *taken)
1816 {
1817 if (not_taken) {
1818 gen_set_label(not_taken);
1819 }
1820 gen_jmp_rel_csize(s, 0, 1);
1821
1822 gen_set_label(taken);
1823 gen_jmp_rel(s, s->dflag, diff, 0);
1824 }
1825
gen_jcc(DisasContext * s,int b,int diff)1826 static void gen_jcc(DisasContext *s, int b, int diff)
1827 {
1828 TCGLabel *l1 = gen_new_label();
1829
1830 gen_jcc1(s, b, l1);
1831 gen_conditional_jump_labels(s, diff, NULL, l1);
1832 }
1833
gen_cmovcc1(DisasContext * s,int b,TCGv dest,TCGv src)1834 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1835 {
1836 CCPrepare cc = gen_prepare_cc(s, b, NULL);
1837
1838 if (!cc.use_reg2) {
1839 cc.reg2 = tcg_constant_tl(cc.imm);
1840 }
1841
1842 tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1843 }
1844
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1845 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1846 {
1847 TCGv selector = tcg_temp_new();
1848 tcg_gen_ext16u_tl(selector, seg);
1849 tcg_gen_st32_tl(selector, tcg_env,
1850 offsetof(CPUX86State,segs[seg_reg].selector));
1851 tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1852 }
1853
1854 /* move SRC to seg_reg and compute if the CPU state may change. Never
1855 call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src)1856 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1857 {
1858 if (PE(s) && !VM86(s)) {
1859 tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1860 gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1861 /* abort translation because the addseg value may change or
1862 because ss32 may change. For R_SS, translation must always
1863 stop as a special handling must be done to disable hardware
1864 interrupts for the next instruction */
1865 if (seg_reg == R_SS) {
1866 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1867 } else if (CODE32(s) && seg_reg < R_FS) {
1868 s->base.is_jmp = DISAS_EOB_NEXT;
1869 }
1870 } else {
1871 gen_op_movl_seg_real(s, seg_reg, src);
1872 if (seg_reg == R_SS) {
1873 s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1874 }
1875 }
1876 }
1877
gen_far_call(DisasContext * s)1878 static void gen_far_call(DisasContext *s)
1879 {
1880 TCGv_i32 new_cs = tcg_temp_new_i32();
1881 tcg_gen_trunc_tl_i32(new_cs, s->T1);
1882 if (PE(s) && !VM86(s)) {
1883 gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1884 tcg_constant_i32(s->dflag - 1),
1885 eip_next_tl(s));
1886 } else {
1887 TCGv_i32 new_eip = tcg_temp_new_i32();
1888 tcg_gen_trunc_tl_i32(new_eip, s->T0);
1889 gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1890 tcg_constant_i32(s->dflag - 1),
1891 eip_next_i32(s));
1892 }
1893 s->base.is_jmp = DISAS_JUMP;
1894 }
1895
gen_far_jmp(DisasContext * s)1896 static void gen_far_jmp(DisasContext *s)
1897 {
1898 if (PE(s) && !VM86(s)) {
1899 TCGv_i32 new_cs = tcg_temp_new_i32();
1900 tcg_gen_trunc_tl_i32(new_cs, s->T1);
1901 gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
1902 eip_next_tl(s));
1903 } else {
1904 gen_op_movl_seg_real(s, R_CS, s->T1);
1905 gen_op_jmp_v(s, s->T0);
1906 }
1907 s->base.is_jmp = DISAS_JUMP;
1908 }
1909
gen_svm_check_intercept(DisasContext * s,uint32_t type)1910 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
1911 {
1912 /* no SVM activated; fast case */
1913 if (likely(!GUEST(s))) {
1914 return;
1915 }
1916 gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
1917 }
1918
gen_stack_update(DisasContext * s,int addend)1919 static inline void gen_stack_update(DisasContext *s, int addend)
1920 {
1921 gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
1922 }
1923
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)1924 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
1925 {
1926 if (offset) {
1927 tcg_gen_addi_tl(dest, src, offset);
1928 src = dest;
1929 }
1930 gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
1931 }
1932
1933 /* Generate a push. It depends on ss32, addseg and dflag. */
gen_push_v(DisasContext * s,TCGv val)1934 static void gen_push_v(DisasContext *s, TCGv val)
1935 {
1936 MemOp d_ot = mo_pushpop(s, s->dflag);
1937 MemOp a_ot = mo_stacksize(s);
1938 int size = 1 << d_ot;
1939 TCGv new_esp = tcg_temp_new();
1940
1941 tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
1942
1943 /* Now reduce the value to the address size and apply SS base. */
1944 gen_lea_ss_ofs(s, s->A0, new_esp, 0);
1945 gen_op_st_v(s, d_ot, val, s->A0);
1946 gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
1947 }
1948
1949 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)1950 static MemOp gen_pop_T0(DisasContext *s)
1951 {
1952 MemOp d_ot = mo_pushpop(s, s->dflag);
1953
1954 gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
1955 gen_op_ld_v(s, d_ot, s->T0, s->T0);
1956
1957 return d_ot;
1958 }
1959
gen_pop_update(DisasContext * s,MemOp ot)1960 static inline void gen_pop_update(DisasContext *s, MemOp ot)
1961 {
1962 gen_stack_update(s, 1 << ot);
1963 }
1964
gen_pusha(DisasContext * s)1965 static void gen_pusha(DisasContext *s)
1966 {
1967 MemOp d_ot = s->dflag;
1968 int size = 1 << d_ot;
1969 int i;
1970
1971 for (i = 0; i < 8; i++) {
1972 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
1973 gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
1974 }
1975
1976 gen_stack_update(s, -8 * size);
1977 }
1978
gen_popa(DisasContext * s)1979 static void gen_popa(DisasContext *s)
1980 {
1981 MemOp d_ot = s->dflag;
1982 int size = 1 << d_ot;
1983 int i;
1984
1985 for (i = 0; i < 8; i++) {
1986 /* ESP is not reloaded */
1987 if (7 - i == R_ESP) {
1988 continue;
1989 }
1990 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
1991 gen_op_ld_v(s, d_ot, s->T0, s->A0);
1992 gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
1993 }
1994
1995 gen_stack_update(s, 8 * size);
1996 }
1997
gen_enter(DisasContext * s,int esp_addend,int level)1998 static void gen_enter(DisasContext *s, int esp_addend, int level)
1999 {
2000 MemOp d_ot = mo_pushpop(s, s->dflag);
2001 MemOp a_ot = mo_stacksize(s);
2002 int size = 1 << d_ot;
2003
2004 /* Push BP; compute FrameTemp into T1. */
2005 tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2006 gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2007 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2008
2009 level &= 31;
2010 if (level != 0) {
2011 int i;
2012
2013 /* Copy level-1 pointers from the previous frame. */
2014 for (i = 1; i < level; ++i) {
2015 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2016 gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2017
2018 gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2019 gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2020 }
2021
2022 /* Push the current FrameTemp as the last level. */
2023 gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2024 gen_op_st_v(s, d_ot, s->T1, s->A0);
2025 }
2026
2027 /* Copy the FrameTemp value to EBP. */
2028 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1);
2029
2030 /* Compute the final value of ESP. */
2031 tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2032 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2033 }
2034
gen_leave(DisasContext * s)2035 static void gen_leave(DisasContext *s)
2036 {
2037 MemOp d_ot = mo_pushpop(s, s->dflag);
2038 MemOp a_ot = mo_stacksize(s);
2039
2040 gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2041 gen_op_ld_v(s, d_ot, s->T0, s->A0);
2042
2043 tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2044
2045 gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2046 gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2047 }
2048
2049 /* Similarly, except that the assumption here is that we don't decode
2050 the instruction at all -- either a missing opcode, an unimplemented
2051 feature, or just a bogus instruction stream. */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2052 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2053 {
2054 gen_illegal_opcode(s);
2055
2056 if (qemu_loglevel_mask(LOG_UNIMP)) {
2057 FILE *logfile = qemu_log_trylock();
2058 if (logfile) {
2059 target_ulong pc = s->base.pc_next, end = s->pc;
2060
2061 fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2062 for (; pc < end; ++pc) {
2063 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2064 }
2065 fprintf(logfile, "\n");
2066 qemu_log_unlock(logfile);
2067 }
2068 }
2069 }
2070
2071 /* an interrupt is different from an exception because of the
2072 privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2073 static void gen_interrupt(DisasContext *s, uint8_t intno)
2074 {
2075 gen_update_cc_op(s);
2076 gen_update_eip_cur(s);
2077 gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2078 cur_insn_len_i32(s));
2079 s->base.is_jmp = DISAS_NORETURN;
2080 }
2081
gen_set_hflag(DisasContext * s,uint32_t mask)2082 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2083 {
2084 if ((s->flags & mask) == 0) {
2085 TCGv_i32 t = tcg_temp_new_i32();
2086 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2087 tcg_gen_ori_i32(t, t, mask);
2088 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2089 s->flags |= mask;
2090 }
2091 }
2092
gen_reset_hflag(DisasContext * s,uint32_t mask)2093 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2094 {
2095 if (s->flags & mask) {
2096 TCGv_i32 t = tcg_temp_new_i32();
2097 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2098 tcg_gen_andi_i32(t, t, ~mask);
2099 tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2100 s->flags &= ~mask;
2101 }
2102 }
2103
gen_set_eflags(DisasContext * s,target_ulong mask)2104 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2105 {
2106 TCGv t = tcg_temp_new();
2107
2108 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2109 tcg_gen_ori_tl(t, t, mask);
2110 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2111 }
2112
gen_reset_eflags(DisasContext * s,target_ulong mask)2113 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2114 {
2115 TCGv t = tcg_temp_new();
2116
2117 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2118 tcg_gen_andi_tl(t, t, ~mask);
2119 tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2120 }
2121
2122 /* Clear BND registers during legacy branches. */
gen_bnd_jmp(DisasContext * s)2123 static void gen_bnd_jmp(DisasContext *s)
2124 {
2125 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2126 and if the BNDREGs are known to be in use (non-zero) already.
2127 The helper itself will check BNDPRESERVE at runtime. */
2128 if ((s->prefix & PREFIX_REPNZ) == 0
2129 && (s->flags & HF_MPX_EN_MASK) != 0
2130 && (s->flags & HF_MPX_IU_MASK) != 0) {
2131 gen_helper_bnd_jmp(tcg_env);
2132 }
2133 }
2134
2135 /*
2136 * Generate an end of block, including common tasks such as generating
2137 * single step traps, resetting the RF flag, and handling the interrupt
2138 * shadow.
2139 */
2140 static void
gen_eob(DisasContext * s,int mode)2141 gen_eob(DisasContext *s, int mode)
2142 {
2143 bool inhibit_reset;
2144
2145 gen_update_cc_op(s);
2146
2147 /* If several instructions disable interrupts, only the first does it. */
2148 inhibit_reset = false;
2149 if (s->flags & HF_INHIBIT_IRQ_MASK) {
2150 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2151 inhibit_reset = true;
2152 } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2153 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2154 }
2155
2156 if (s->base.tb->flags & HF_RF_MASK) {
2157 gen_reset_eflags(s, RF_MASK);
2158 }
2159 if (mode == DISAS_EOB_RECHECK_TF) {
2160 gen_helper_rechecking_single_step(tcg_env);
2161 tcg_gen_exit_tb(NULL, 0);
2162 } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2163 gen_helper_single_step(tcg_env);
2164 } else if (mode == DISAS_JUMP &&
2165 /* give irqs a chance to happen */
2166 !inhibit_reset) {
2167 tcg_gen_lookup_and_goto_ptr();
2168 } else {
2169 tcg_gen_exit_tb(NULL, 0);
2170 }
2171
2172 s->base.is_jmp = DISAS_NORETURN;
2173 }
2174
2175 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2176 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2177 {
2178 bool use_goto_tb = s->jmp_opt;
2179 target_ulong mask = -1;
2180 target_ulong new_pc = s->pc + diff;
2181 target_ulong new_eip = new_pc - s->cs_base;
2182
2183 assert(!s->cc_op_dirty);
2184
2185 /* In 64-bit mode, operand size is fixed at 64 bits. */
2186 if (!CODE64(s)) {
2187 if (ot == MO_16) {
2188 mask = 0xffff;
2189 if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2190 use_goto_tb = false;
2191 }
2192 } else {
2193 mask = 0xffffffff;
2194 }
2195 }
2196 new_eip &= mask;
2197
2198 if (tb_cflags(s->base.tb) & CF_PCREL) {
2199 tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2200 /*
2201 * If we can prove the branch does not leave the page and we have
2202 * no extra masking to apply (data16 branch in code32, see above),
2203 * then we have also proven that the addition does not wrap.
2204 */
2205 if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2206 tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2207 use_goto_tb = false;
2208 }
2209 } else if (!CODE64(s)) {
2210 new_pc = (uint32_t)(new_eip + s->cs_base);
2211 }
2212
2213 if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2214 /* jump to same page: we can use a direct jump */
2215 tcg_gen_goto_tb(tb_num);
2216 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2217 tcg_gen_movi_tl(cpu_eip, new_eip);
2218 }
2219 tcg_gen_exit_tb(s->base.tb, tb_num);
2220 s->base.is_jmp = DISAS_NORETURN;
2221 } else {
2222 if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2223 tcg_gen_movi_tl(cpu_eip, new_eip);
2224 }
2225 if (s->jmp_opt) {
2226 gen_eob(s, DISAS_JUMP); /* jump to another page */
2227 } else {
2228 gen_eob(s, DISAS_EOB_ONLY); /* exit to main loop */
2229 }
2230 }
2231 }
2232
2233 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2234 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2235 {
2236 /* CODE64 ignores the OT argument, so we need not consider it. */
2237 gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2238 }
2239
gen_ldq_env_A0(DisasContext * s,int offset)2240 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2241 {
2242 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2243 tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2244 }
2245
gen_stq_env_A0(DisasContext * s,int offset)2246 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2247 {
2248 tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2249 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2250 }
2251
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2252 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2253 {
2254 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2255 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2256 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2257 int mem_index = s->mem_index;
2258 TCGv_i128 t = tcg_temp_new_i128();
2259
2260 tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2261 tcg_gen_st_i128(t, tcg_env, offset);
2262 }
2263
gen_sto_env_A0(DisasContext * s,int offset,bool align)2264 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2265 {
2266 MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2267 ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2268 MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2269 int mem_index = s->mem_index;
2270 TCGv_i128 t = tcg_temp_new_i128();
2271
2272 tcg_gen_ld_i128(t, tcg_env, offset);
2273 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2274 }
2275
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2276 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2277 {
2278 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2279 int mem_index = s->mem_index;
2280 TCGv_i128 t0 = tcg_temp_new_i128();
2281 TCGv_i128 t1 = tcg_temp_new_i128();
2282
2283 tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2284 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2285 tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2286
2287 tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2288 tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2289 }
2290
gen_sty_env_A0(DisasContext * s,int offset,bool align)2291 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2292 {
2293 MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2294 int mem_index = s->mem_index;
2295 TCGv_i128 t = tcg_temp_new_i128();
2296
2297 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2298 tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2299 tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2300 tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2301 tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2302 }
2303
gen_cmpxchg8b(DisasContext * s,CPUX86State * env,int modrm)2304 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2305 {
2306 TCGv_i64 cmp, val, old;
2307 TCGv Z;
2308
2309 gen_lea_modrm(env, s, modrm);
2310
2311 cmp = tcg_temp_new_i64();
2312 val = tcg_temp_new_i64();
2313 old = tcg_temp_new_i64();
2314
2315 /* Construct the comparison values from the register pair. */
2316 tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2317 tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2318
2319 /* Only require atomic with LOCK; non-parallel handled in generator. */
2320 if (s->prefix & PREFIX_LOCK) {
2321 tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2322 } else {
2323 tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2324 s->mem_index, MO_TEUQ);
2325 }
2326
2327 /* Set tmp0 to match the required value of Z. */
2328 tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
2329 Z = tcg_temp_new();
2330 tcg_gen_trunc_i64_tl(Z, cmp);
2331
2332 /*
2333 * Extract the result values for the register pair.
2334 * For 32-bit, we may do this unconditionally, because on success (Z=1),
2335 * the old value matches the previous value in EDX:EAX. For x86_64,
2336 * the store must be conditional, because we must leave the source
2337 * registers unchanged on success, and zero-extend the writeback
2338 * on failure (Z=0).
2339 */
2340 if (TARGET_LONG_BITS == 32) {
2341 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
2342 } else {
2343 TCGv zero = tcg_constant_tl(0);
2344
2345 tcg_gen_extr_i64_tl(s->T0, s->T1, old);
2346 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
2347 s->T0, cpu_regs[R_EAX]);
2348 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
2349 s->T1, cpu_regs[R_EDX]);
2350 }
2351
2352 /* Update Z. */
2353 gen_compute_eflags(s);
2354 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
2355 }
2356
2357 #ifdef TARGET_X86_64
gen_cmpxchg16b(DisasContext * s,CPUX86State * env,int modrm)2358 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
2359 {
2360 MemOp mop = MO_TE | MO_128 | MO_ALIGN;
2361 TCGv_i64 t0, t1;
2362 TCGv_i128 cmp, val;
2363
2364 gen_lea_modrm(env, s, modrm);
2365
2366 cmp = tcg_temp_new_i128();
2367 val = tcg_temp_new_i128();
2368 tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2369 tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2370
2371 /* Only require atomic with LOCK; non-parallel handled in generator. */
2372 if (s->prefix & PREFIX_LOCK) {
2373 tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2374 } else {
2375 tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2376 }
2377
2378 tcg_gen_extr_i128_i64(s->T0, s->T1, val);
2379
2380 /* Determine success after the fact. */
2381 t0 = tcg_temp_new_i64();
2382 t1 = tcg_temp_new_i64();
2383 tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
2384 tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
2385 tcg_gen_or_i64(t0, t0, t1);
2386
2387 /* Update Z. */
2388 gen_compute_eflags(s);
2389 tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
2390 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
2391
2392 /*
2393 * Extract the result values for the register pair. We may do this
2394 * unconditionally, because on success (Z=1), the old value matches
2395 * the previous value in RDX:RAX.
2396 */
2397 tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
2398 tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
2399 }
2400 #endif
2401
disas_insn_x87(DisasContext * s,CPUState * cpu,int b)2402 static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
2403 {
2404 CPUX86State *env = cpu_env(cpu);
2405 bool update_fip = true;
2406 int modrm, mod, rm, op;
2407
2408 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2409 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2410 /* XXX: what to do if illegal op ? */
2411 gen_exception(s, EXCP07_PREX);
2412 return true;
2413 }
2414 modrm = x86_ldub_code(env, s);
2415 mod = (modrm >> 6) & 3;
2416 rm = modrm & 7;
2417 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2418 if (mod != 3) {
2419 /* memory op */
2420 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2421 TCGv ea = gen_lea_modrm_1(s, a, false);
2422 TCGv last_addr = tcg_temp_new();
2423 bool update_fdp = true;
2424
2425 tcg_gen_mov_tl(last_addr, ea);
2426 gen_lea_v_seg(s, ea, a.def_seg, s->override);
2427
2428 switch (op) {
2429 case 0x00 ... 0x07: /* fxxxs */
2430 case 0x10 ... 0x17: /* fixxxl */
2431 case 0x20 ... 0x27: /* fxxxl */
2432 case 0x30 ... 0x37: /* fixxx */
2433 {
2434 int op1;
2435 op1 = op & 7;
2436
2437 switch (op >> 4) {
2438 case 0:
2439 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2440 s->mem_index, MO_LEUL);
2441 gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2442 break;
2443 case 1:
2444 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2445 s->mem_index, MO_LEUL);
2446 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2447 break;
2448 case 2:
2449 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2450 s->mem_index, MO_LEUQ);
2451 gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2452 break;
2453 case 3:
2454 default:
2455 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2456 s->mem_index, MO_LESW);
2457 gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2458 break;
2459 }
2460
2461 gen_helper_fp_arith_ST0_FT0(op1);
2462 if (op1 == 3) {
2463 /* fcomp needs pop */
2464 gen_helper_fpop(tcg_env);
2465 }
2466 }
2467 break;
2468 case 0x08: /* flds */
2469 case 0x0a: /* fsts */
2470 case 0x0b: /* fstps */
2471 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2472 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2473 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2474 switch (op & 7) {
2475 case 0:
2476 switch (op >> 4) {
2477 case 0:
2478 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2479 s->mem_index, MO_LEUL);
2480 gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2481 break;
2482 case 1:
2483 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2484 s->mem_index, MO_LEUL);
2485 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2486 break;
2487 case 2:
2488 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2489 s->mem_index, MO_LEUQ);
2490 gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2491 break;
2492 case 3:
2493 default:
2494 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2495 s->mem_index, MO_LESW);
2496 gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2497 break;
2498 }
2499 break;
2500 case 1:
2501 /* XXX: the corresponding CPUID bit must be tested ! */
2502 switch (op >> 4) {
2503 case 1:
2504 gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2505 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2506 s->mem_index, MO_LEUL);
2507 break;
2508 case 2:
2509 gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2510 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2511 s->mem_index, MO_LEUQ);
2512 break;
2513 case 3:
2514 default:
2515 gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2516 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2517 s->mem_index, MO_LEUW);
2518 break;
2519 }
2520 gen_helper_fpop(tcg_env);
2521 break;
2522 default:
2523 switch (op >> 4) {
2524 case 0:
2525 gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2526 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2527 s->mem_index, MO_LEUL);
2528 break;
2529 case 1:
2530 gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2531 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2532 s->mem_index, MO_LEUL);
2533 break;
2534 case 2:
2535 gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2536 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2537 s->mem_index, MO_LEUQ);
2538 break;
2539 case 3:
2540 default:
2541 gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2542 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2543 s->mem_index, MO_LEUW);
2544 break;
2545 }
2546 if ((op & 7) == 3) {
2547 gen_helper_fpop(tcg_env);
2548 }
2549 break;
2550 }
2551 break;
2552 case 0x0c: /* fldenv mem */
2553 gen_helper_fldenv(tcg_env, s->A0,
2554 tcg_constant_i32(s->dflag - 1));
2555 update_fip = update_fdp = false;
2556 break;
2557 case 0x0d: /* fldcw mem */
2558 tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2559 s->mem_index, MO_LEUW);
2560 gen_helper_fldcw(tcg_env, s->tmp2_i32);
2561 update_fip = update_fdp = false;
2562 break;
2563 case 0x0e: /* fnstenv mem */
2564 gen_helper_fstenv(tcg_env, s->A0,
2565 tcg_constant_i32(s->dflag - 1));
2566 update_fip = update_fdp = false;
2567 break;
2568 case 0x0f: /* fnstcw mem */
2569 gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2570 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2571 s->mem_index, MO_LEUW);
2572 update_fip = update_fdp = false;
2573 break;
2574 case 0x1d: /* fldt mem */
2575 gen_helper_fldt_ST0(tcg_env, s->A0);
2576 break;
2577 case 0x1f: /* fstpt mem */
2578 gen_helper_fstt_ST0(tcg_env, s->A0);
2579 gen_helper_fpop(tcg_env);
2580 break;
2581 case 0x2c: /* frstor mem */
2582 gen_helper_frstor(tcg_env, s->A0,
2583 tcg_constant_i32(s->dflag - 1));
2584 update_fip = update_fdp = false;
2585 break;
2586 case 0x2e: /* fnsave mem */
2587 gen_helper_fsave(tcg_env, s->A0,
2588 tcg_constant_i32(s->dflag - 1));
2589 update_fip = update_fdp = false;
2590 break;
2591 case 0x2f: /* fnstsw mem */
2592 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2593 tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2594 s->mem_index, MO_LEUW);
2595 update_fip = update_fdp = false;
2596 break;
2597 case 0x3c: /* fbld */
2598 gen_helper_fbld_ST0(tcg_env, s->A0);
2599 break;
2600 case 0x3e: /* fbstp */
2601 gen_helper_fbst_ST0(tcg_env, s->A0);
2602 gen_helper_fpop(tcg_env);
2603 break;
2604 case 0x3d: /* fildll */
2605 tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2606 s->mem_index, MO_LEUQ);
2607 gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2608 break;
2609 case 0x3f: /* fistpll */
2610 gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2611 tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2612 s->mem_index, MO_LEUQ);
2613 gen_helper_fpop(tcg_env);
2614 break;
2615 default:
2616 return false;
2617 }
2618
2619 if (update_fdp) {
2620 int last_seg = s->override >= 0 ? s->override : a.def_seg;
2621
2622 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2623 offsetof(CPUX86State,
2624 segs[last_seg].selector));
2625 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2626 offsetof(CPUX86State, fpds));
2627 tcg_gen_st_tl(last_addr, tcg_env,
2628 offsetof(CPUX86State, fpdp));
2629 }
2630 } else {
2631 /* register float ops */
2632 int opreg = rm;
2633
2634 switch (op) {
2635 case 0x08: /* fld sti */
2636 gen_helper_fpush(tcg_env);
2637 gen_helper_fmov_ST0_STN(tcg_env,
2638 tcg_constant_i32((opreg + 1) & 7));
2639 break;
2640 case 0x09: /* fxchg sti */
2641 case 0x29: /* fxchg4 sti, undocumented op */
2642 case 0x39: /* fxchg7 sti, undocumented op */
2643 gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2644 break;
2645 case 0x0a: /* grp d9/2 */
2646 switch (rm) {
2647 case 0: /* fnop */
2648 /*
2649 * check exceptions (FreeBSD FPU probe)
2650 * needs to be treated as I/O because of ferr_irq
2651 */
2652 translator_io_start(&s->base);
2653 gen_helper_fwait(tcg_env);
2654 update_fip = false;
2655 break;
2656 default:
2657 return false;
2658 }
2659 break;
2660 case 0x0c: /* grp d9/4 */
2661 switch (rm) {
2662 case 0: /* fchs */
2663 gen_helper_fchs_ST0(tcg_env);
2664 break;
2665 case 1: /* fabs */
2666 gen_helper_fabs_ST0(tcg_env);
2667 break;
2668 case 4: /* ftst */
2669 gen_helper_fldz_FT0(tcg_env);
2670 gen_helper_fcom_ST0_FT0(tcg_env);
2671 break;
2672 case 5: /* fxam */
2673 gen_helper_fxam_ST0(tcg_env);
2674 break;
2675 default:
2676 return false;
2677 }
2678 break;
2679 case 0x0d: /* grp d9/5 */
2680 {
2681 switch (rm) {
2682 case 0:
2683 gen_helper_fpush(tcg_env);
2684 gen_helper_fld1_ST0(tcg_env);
2685 break;
2686 case 1:
2687 gen_helper_fpush(tcg_env);
2688 gen_helper_fldl2t_ST0(tcg_env);
2689 break;
2690 case 2:
2691 gen_helper_fpush(tcg_env);
2692 gen_helper_fldl2e_ST0(tcg_env);
2693 break;
2694 case 3:
2695 gen_helper_fpush(tcg_env);
2696 gen_helper_fldpi_ST0(tcg_env);
2697 break;
2698 case 4:
2699 gen_helper_fpush(tcg_env);
2700 gen_helper_fldlg2_ST0(tcg_env);
2701 break;
2702 case 5:
2703 gen_helper_fpush(tcg_env);
2704 gen_helper_fldln2_ST0(tcg_env);
2705 break;
2706 case 6:
2707 gen_helper_fpush(tcg_env);
2708 gen_helper_fldz_ST0(tcg_env);
2709 break;
2710 default:
2711 return false;
2712 }
2713 }
2714 break;
2715 case 0x0e: /* grp d9/6 */
2716 switch (rm) {
2717 case 0: /* f2xm1 */
2718 gen_helper_f2xm1(tcg_env);
2719 break;
2720 case 1: /* fyl2x */
2721 gen_helper_fyl2x(tcg_env);
2722 break;
2723 case 2: /* fptan */
2724 gen_helper_fptan(tcg_env);
2725 break;
2726 case 3: /* fpatan */
2727 gen_helper_fpatan(tcg_env);
2728 break;
2729 case 4: /* fxtract */
2730 gen_helper_fxtract(tcg_env);
2731 break;
2732 case 5: /* fprem1 */
2733 gen_helper_fprem1(tcg_env);
2734 break;
2735 case 6: /* fdecstp */
2736 gen_helper_fdecstp(tcg_env);
2737 break;
2738 default:
2739 case 7: /* fincstp */
2740 gen_helper_fincstp(tcg_env);
2741 break;
2742 }
2743 break;
2744 case 0x0f: /* grp d9/7 */
2745 switch (rm) {
2746 case 0: /* fprem */
2747 gen_helper_fprem(tcg_env);
2748 break;
2749 case 1: /* fyl2xp1 */
2750 gen_helper_fyl2xp1(tcg_env);
2751 break;
2752 case 2: /* fsqrt */
2753 gen_helper_fsqrt(tcg_env);
2754 break;
2755 case 3: /* fsincos */
2756 gen_helper_fsincos(tcg_env);
2757 break;
2758 case 5: /* fscale */
2759 gen_helper_fscale(tcg_env);
2760 break;
2761 case 4: /* frndint */
2762 gen_helper_frndint(tcg_env);
2763 break;
2764 case 6: /* fsin */
2765 gen_helper_fsin(tcg_env);
2766 break;
2767 default:
2768 case 7: /* fcos */
2769 gen_helper_fcos(tcg_env);
2770 break;
2771 }
2772 break;
2773 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2774 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2775 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2776 {
2777 int op1;
2778
2779 op1 = op & 7;
2780 if (op >= 0x20) {
2781 gen_helper_fp_arith_STN_ST0(op1, opreg);
2782 if (op >= 0x30) {
2783 gen_helper_fpop(tcg_env);
2784 }
2785 } else {
2786 gen_helper_fmov_FT0_STN(tcg_env,
2787 tcg_constant_i32(opreg));
2788 gen_helper_fp_arith_ST0_FT0(op1);
2789 }
2790 }
2791 break;
2792 case 0x02: /* fcom */
2793 case 0x22: /* fcom2, undocumented op */
2794 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2795 gen_helper_fcom_ST0_FT0(tcg_env);
2796 break;
2797 case 0x03: /* fcomp */
2798 case 0x23: /* fcomp3, undocumented op */
2799 case 0x32: /* fcomp5, undocumented op */
2800 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2801 gen_helper_fcom_ST0_FT0(tcg_env);
2802 gen_helper_fpop(tcg_env);
2803 break;
2804 case 0x15: /* da/5 */
2805 switch (rm) {
2806 case 1: /* fucompp */
2807 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2808 gen_helper_fucom_ST0_FT0(tcg_env);
2809 gen_helper_fpop(tcg_env);
2810 gen_helper_fpop(tcg_env);
2811 break;
2812 default:
2813 return false;
2814 }
2815 break;
2816 case 0x1c:
2817 switch (rm) {
2818 case 0: /* feni (287 only, just do nop here) */
2819 break;
2820 case 1: /* fdisi (287 only, just do nop here) */
2821 break;
2822 case 2: /* fclex */
2823 gen_helper_fclex(tcg_env);
2824 update_fip = false;
2825 break;
2826 case 3: /* fninit */
2827 gen_helper_fninit(tcg_env);
2828 update_fip = false;
2829 break;
2830 case 4: /* fsetpm (287 only, just do nop here) */
2831 break;
2832 default:
2833 return false;
2834 }
2835 break;
2836 case 0x1d: /* fucomi */
2837 if (!(s->cpuid_features & CPUID_CMOV)) {
2838 goto illegal_op;
2839 }
2840 gen_update_cc_op(s);
2841 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2842 gen_helper_fucomi_ST0_FT0(tcg_env);
2843 assume_cc_op(s, CC_OP_EFLAGS);
2844 break;
2845 case 0x1e: /* fcomi */
2846 if (!(s->cpuid_features & CPUID_CMOV)) {
2847 goto illegal_op;
2848 }
2849 gen_update_cc_op(s);
2850 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2851 gen_helper_fcomi_ST0_FT0(tcg_env);
2852 assume_cc_op(s, CC_OP_EFLAGS);
2853 break;
2854 case 0x28: /* ffree sti */
2855 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2856 break;
2857 case 0x2a: /* fst sti */
2858 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2859 break;
2860 case 0x2b: /* fstp sti */
2861 case 0x0b: /* fstp1 sti, undocumented op */
2862 case 0x3a: /* fstp8 sti, undocumented op */
2863 case 0x3b: /* fstp9 sti, undocumented op */
2864 gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2865 gen_helper_fpop(tcg_env);
2866 break;
2867 case 0x2c: /* fucom st(i) */
2868 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2869 gen_helper_fucom_ST0_FT0(tcg_env);
2870 break;
2871 case 0x2d: /* fucomp st(i) */
2872 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2873 gen_helper_fucom_ST0_FT0(tcg_env);
2874 gen_helper_fpop(tcg_env);
2875 break;
2876 case 0x33: /* de/3 */
2877 switch (rm) {
2878 case 1: /* fcompp */
2879 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2880 gen_helper_fcom_ST0_FT0(tcg_env);
2881 gen_helper_fpop(tcg_env);
2882 gen_helper_fpop(tcg_env);
2883 break;
2884 default:
2885 return false;
2886 }
2887 break;
2888 case 0x38: /* ffreep sti, undocumented op */
2889 gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2890 gen_helper_fpop(tcg_env);
2891 break;
2892 case 0x3c: /* df/4 */
2893 switch (rm) {
2894 case 0:
2895 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2896 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2897 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2898 break;
2899 default:
2900 return false;
2901 }
2902 break;
2903 case 0x3d: /* fucomip */
2904 if (!(s->cpuid_features & CPUID_CMOV)) {
2905 goto illegal_op;
2906 }
2907 gen_update_cc_op(s);
2908 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2909 gen_helper_fucomi_ST0_FT0(tcg_env);
2910 gen_helper_fpop(tcg_env);
2911 assume_cc_op(s, CC_OP_EFLAGS);
2912 break;
2913 case 0x3e: /* fcomip */
2914 if (!(s->cpuid_features & CPUID_CMOV)) {
2915 goto illegal_op;
2916 }
2917 gen_update_cc_op(s);
2918 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2919 gen_helper_fcomi_ST0_FT0(tcg_env);
2920 gen_helper_fpop(tcg_env);
2921 assume_cc_op(s, CC_OP_EFLAGS);
2922 break;
2923 case 0x10 ... 0x13: /* fcmovxx */
2924 case 0x18 ... 0x1b:
2925 {
2926 int op1;
2927 TCGLabel *l1;
2928 static const uint8_t fcmov_cc[8] = {
2929 (JCC_B << 1),
2930 (JCC_Z << 1),
2931 (JCC_BE << 1),
2932 (JCC_P << 1),
2933 };
2934
2935 if (!(s->cpuid_features & CPUID_CMOV)) {
2936 goto illegal_op;
2937 }
2938 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
2939 l1 = gen_new_label();
2940 gen_jcc1_noeob(s, op1, l1);
2941 gen_helper_fmov_ST0_STN(tcg_env,
2942 tcg_constant_i32(opreg));
2943 gen_set_label(l1);
2944 }
2945 break;
2946 default:
2947 return false;
2948 }
2949 }
2950
2951 if (update_fip) {
2952 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2953 offsetof(CPUX86State, segs[R_CS].selector));
2954 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2955 offsetof(CPUX86State, fpcs));
2956 tcg_gen_st_tl(eip_cur_tl(s),
2957 tcg_env, offsetof(CPUX86State, fpip));
2958 }
2959 return true;
2960
2961 illegal_op:
2962 gen_illegal_opcode(s);
2963 return true;
2964 }
2965
disas_insn_old(DisasContext * s,CPUState * cpu,int b)2966 static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
2967 {
2968 CPUX86State *env = cpu_env(cpu);
2969 int prefixes = s->prefix;
2970 MemOp dflag = s->dflag;
2971 MemOp ot;
2972 int modrm, reg, rm, mod, op, val;
2973
2974 /* now check op code */
2975 switch (b) {
2976 case 0x1c7: /* cmpxchg8b */
2977 modrm = x86_ldub_code(env, s);
2978 mod = (modrm >> 6) & 3;
2979 switch ((modrm >> 3) & 7) {
2980 case 1: /* CMPXCHG8, CMPXCHG16 */
2981 if (mod == 3) {
2982 goto illegal_op;
2983 }
2984 #ifdef TARGET_X86_64
2985 if (dflag == MO_64) {
2986 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
2987 goto illegal_op;
2988 }
2989 gen_cmpxchg16b(s, env, modrm);
2990 break;
2991 }
2992 #endif
2993 if (!(s->cpuid_features & CPUID_CX8)) {
2994 goto illegal_op;
2995 }
2996 gen_cmpxchg8b(s, env, modrm);
2997 break;
2998
2999 case 7: /* RDSEED, RDPID with f3 prefix */
3000 if (mod != 3 ||
3001 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3002 goto illegal_op;
3003 }
3004 if (s->prefix & PREFIX_REPZ) {
3005 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3006 goto illegal_op;
3007 }
3008 gen_helper_rdpid(s->T0, tcg_env);
3009 rm = (modrm & 7) | REX_B(s);
3010 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3011 break;
3012 } else {
3013 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3014 goto illegal_op;
3015 }
3016 goto do_rdrand;
3017 }
3018
3019 case 6: /* RDRAND */
3020 if (mod != 3 ||
3021 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3022 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3023 goto illegal_op;
3024 }
3025 do_rdrand:
3026 translator_io_start(&s->base);
3027 gen_helper_rdrand(s->T0, tcg_env);
3028 rm = (modrm & 7) | REX_B(s);
3029 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3030 assume_cc_op(s, CC_OP_EFLAGS);
3031 break;
3032
3033 default:
3034 goto illegal_op;
3035 }
3036 break;
3037
3038 /************************/
3039 /* bit operations */
3040 case 0x1ba: /* bt/bts/btr/btc Gv, im */
3041 ot = dflag;
3042 modrm = x86_ldub_code(env, s);
3043 op = (modrm >> 3) & 7;
3044 mod = (modrm >> 6) & 3;
3045 rm = (modrm & 7) | REX_B(s);
3046 if (mod != 3) {
3047 s->rip_offset = 1;
3048 gen_lea_modrm(env, s, modrm);
3049 if (!(s->prefix & PREFIX_LOCK)) {
3050 gen_op_ld_v(s, ot, s->T0, s->A0);
3051 }
3052 } else {
3053 gen_op_mov_v_reg(s, ot, s->T0, rm);
3054 }
3055 /* load shift */
3056 val = x86_ldub_code(env, s);
3057 tcg_gen_movi_tl(s->T1, val);
3058 if (op < 4)
3059 goto unknown_op;
3060 op -= 4;
3061 goto bt_op;
3062 case 0x1a3: /* bt Gv, Ev */
3063 op = 0;
3064 goto do_btx;
3065 case 0x1ab: /* bts */
3066 op = 1;
3067 goto do_btx;
3068 case 0x1b3: /* btr */
3069 op = 2;
3070 goto do_btx;
3071 case 0x1bb: /* btc */
3072 op = 3;
3073 do_btx:
3074 ot = dflag;
3075 modrm = x86_ldub_code(env, s);
3076 reg = ((modrm >> 3) & 7) | REX_R(s);
3077 mod = (modrm >> 6) & 3;
3078 rm = (modrm & 7) | REX_B(s);
3079 gen_op_mov_v_reg(s, MO_32, s->T1, reg);
3080 if (mod != 3) {
3081 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3082 /* specific case: we need to add a displacement */
3083 gen_exts(ot, s->T1);
3084 tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
3085 tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
3086 tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
3087 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3088 if (!(s->prefix & PREFIX_LOCK)) {
3089 gen_op_ld_v(s, ot, s->T0, s->A0);
3090 }
3091 } else {
3092 gen_op_mov_v_reg(s, ot, s->T0, rm);
3093 }
3094 bt_op:
3095 tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
3096 tcg_gen_movi_tl(s->tmp0, 1);
3097 tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
3098 if (s->prefix & PREFIX_LOCK) {
3099 switch (op) {
3100 case 0: /* bt */
3101 /* Needs no atomic ops; we suppressed the normal
3102 memory load for LOCK above so do it now. */
3103 gen_op_ld_v(s, ot, s->T0, s->A0);
3104 break;
3105 case 1: /* bts */
3106 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
3107 s->mem_index, ot | MO_LE);
3108 break;
3109 case 2: /* btr */
3110 tcg_gen_not_tl(s->tmp0, s->tmp0);
3111 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
3112 s->mem_index, ot | MO_LE);
3113 break;
3114 default:
3115 case 3: /* btc */
3116 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
3117 s->mem_index, ot | MO_LE);
3118 break;
3119 }
3120 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3121 } else {
3122 tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3123 switch (op) {
3124 case 0: /* bt */
3125 /* Data already loaded; nothing to do. */
3126 break;
3127 case 1: /* bts */
3128 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
3129 break;
3130 case 2: /* btr */
3131 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
3132 break;
3133 default:
3134 case 3: /* btc */
3135 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
3136 break;
3137 }
3138 if (op != 0) {
3139 if (mod != 3) {
3140 gen_op_st_v(s, ot, s->T0, s->A0);
3141 } else {
3142 gen_op_mov_reg_v(s, ot, rm, s->T0);
3143 }
3144 }
3145 }
3146
3147 /* Delay all CC updates until after the store above. Note that
3148 C is the result of the test, Z is unchanged, and the others
3149 are all undefined. */
3150 switch (s->cc_op) {
3151 case CC_OP_MULB ... CC_OP_MULQ:
3152 case CC_OP_ADDB ... CC_OP_ADDQ:
3153 case CC_OP_ADCB ... CC_OP_ADCQ:
3154 case CC_OP_SUBB ... CC_OP_SUBQ:
3155 case CC_OP_SBBB ... CC_OP_SBBQ:
3156 case CC_OP_LOGICB ... CC_OP_LOGICQ:
3157 case CC_OP_INCB ... CC_OP_INCQ:
3158 case CC_OP_DECB ... CC_OP_DECQ:
3159 case CC_OP_SHLB ... CC_OP_SHLQ:
3160 case CC_OP_SARB ... CC_OP_SARQ:
3161 case CC_OP_BMILGB ... CC_OP_BMILGQ:
3162 case CC_OP_POPCNT:
3163 /* Z was going to be computed from the non-zero status of CC_DST.
3164 We can get that same Z value (and the new C value) by leaving
3165 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
3166 same width. */
3167 tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
3168 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
3169 break;
3170 default:
3171 /* Otherwise, generate EFLAGS and replace the C bit. */
3172 gen_compute_eflags(s);
3173 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
3174 ctz32(CC_C), 1);
3175 break;
3176 }
3177 break;
3178 case 0x100:
3179 modrm = x86_ldub_code(env, s);
3180 mod = (modrm >> 6) & 3;
3181 op = (modrm >> 3) & 7;
3182 switch(op) {
3183 case 0: /* sldt */
3184 if (!PE(s) || VM86(s))
3185 goto illegal_op;
3186 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3187 break;
3188 }
3189 gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3190 tcg_gen_ld32u_tl(s->T0, tcg_env,
3191 offsetof(CPUX86State, ldt.selector));
3192 ot = mod == 3 ? dflag : MO_16;
3193 gen_st_modrm(env, s, modrm, ot);
3194 break;
3195 case 2: /* lldt */
3196 if (!PE(s) || VM86(s))
3197 goto illegal_op;
3198 if (check_cpl0(s)) {
3199 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3200 gen_ld_modrm(env, s, modrm, MO_16);
3201 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3202 gen_helper_lldt(tcg_env, s->tmp2_i32);
3203 }
3204 break;
3205 case 1: /* str */
3206 if (!PE(s) || VM86(s))
3207 goto illegal_op;
3208 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3209 break;
3210 }
3211 gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3212 tcg_gen_ld32u_tl(s->T0, tcg_env,
3213 offsetof(CPUX86State, tr.selector));
3214 ot = mod == 3 ? dflag : MO_16;
3215 gen_st_modrm(env, s, modrm, ot);
3216 break;
3217 case 3: /* ltr */
3218 if (!PE(s) || VM86(s))
3219 goto illegal_op;
3220 if (check_cpl0(s)) {
3221 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3222 gen_ld_modrm(env, s, modrm, MO_16);
3223 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3224 gen_helper_ltr(tcg_env, s->tmp2_i32);
3225 }
3226 break;
3227 case 4: /* verr */
3228 case 5: /* verw */
3229 if (!PE(s) || VM86(s))
3230 goto illegal_op;
3231 gen_ld_modrm(env, s, modrm, MO_16);
3232 gen_update_cc_op(s);
3233 if (op == 4) {
3234 gen_helper_verr(tcg_env, s->T0);
3235 } else {
3236 gen_helper_verw(tcg_env, s->T0);
3237 }
3238 assume_cc_op(s, CC_OP_EFLAGS);
3239 break;
3240 default:
3241 goto unknown_op;
3242 }
3243 break;
3244
3245 case 0x101:
3246 modrm = x86_ldub_code(env, s);
3247 switch (modrm) {
3248 CASE_MODRM_MEM_OP(0): /* sgdt */
3249 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3250 break;
3251 }
3252 gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3253 gen_lea_modrm(env, s, modrm);
3254 tcg_gen_ld32u_tl(s->T0,
3255 tcg_env, offsetof(CPUX86State, gdt.limit));
3256 gen_op_st_v(s, MO_16, s->T0, s->A0);
3257 gen_add_A0_im(s, 2);
3258 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3259 /*
3260 * NB: Despite a confusing description in Intel CPU documentation,
3261 * all 32-bits are written regardless of operand size.
3262 */
3263 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3264 break;
3265
3266 case 0xc8: /* monitor */
3267 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3268 goto illegal_op;
3269 }
3270 gen_update_cc_op(s);
3271 gen_update_eip_cur(s);
3272 gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3273 gen_helper_monitor(tcg_env, s->A0);
3274 break;
3275
3276 case 0xc9: /* mwait */
3277 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3278 goto illegal_op;
3279 }
3280 gen_update_cc_op(s);
3281 gen_update_eip_cur(s);
3282 gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3283 s->base.is_jmp = DISAS_NORETURN;
3284 break;
3285
3286 case 0xca: /* clac */
3287 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3288 || CPL(s) != 0) {
3289 goto illegal_op;
3290 }
3291 gen_reset_eflags(s, AC_MASK);
3292 s->base.is_jmp = DISAS_EOB_NEXT;
3293 break;
3294
3295 case 0xcb: /* stac */
3296 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3297 || CPL(s) != 0) {
3298 goto illegal_op;
3299 }
3300 gen_set_eflags(s, AC_MASK);
3301 s->base.is_jmp = DISAS_EOB_NEXT;
3302 break;
3303
3304 CASE_MODRM_MEM_OP(1): /* sidt */
3305 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3306 break;
3307 }
3308 gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3309 gen_lea_modrm(env, s, modrm);
3310 tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3311 gen_op_st_v(s, MO_16, s->T0, s->A0);
3312 gen_add_A0_im(s, 2);
3313 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3314 /*
3315 * NB: Despite a confusing description in Intel CPU documentation,
3316 * all 32-bits are written regardless of operand size.
3317 */
3318 gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3319 break;
3320
3321 case 0xd0: /* xgetbv */
3322 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3323 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3324 | PREFIX_REPZ | PREFIX_REPNZ))) {
3325 goto illegal_op;
3326 }
3327 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3328 gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3329 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3330 break;
3331
3332 case 0xd1: /* xsetbv */
3333 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3334 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3335 | PREFIX_REPZ | PREFIX_REPNZ))) {
3336 goto illegal_op;
3337 }
3338 gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3339 if (!check_cpl0(s)) {
3340 break;
3341 }
3342 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3343 cpu_regs[R_EDX]);
3344 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3345 gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3346 /* End TB because translation flags may change. */
3347 s->base.is_jmp = DISAS_EOB_NEXT;
3348 break;
3349
3350 case 0xd8: /* VMRUN */
3351 if (!SVME(s) || !PE(s)) {
3352 goto illegal_op;
3353 }
3354 if (!check_cpl0(s)) {
3355 break;
3356 }
3357 gen_update_cc_op(s);
3358 gen_update_eip_cur(s);
3359 /*
3360 * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3361 * The usual gen_eob() handling is performed on vmexit after
3362 * host state is reloaded.
3363 */
3364 gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3365 cur_insn_len_i32(s));
3366 tcg_gen_exit_tb(NULL, 0);
3367 s->base.is_jmp = DISAS_NORETURN;
3368 break;
3369
3370 case 0xd9: /* VMMCALL */
3371 if (!SVME(s)) {
3372 goto illegal_op;
3373 }
3374 gen_update_cc_op(s);
3375 gen_update_eip_cur(s);
3376 gen_helper_vmmcall(tcg_env);
3377 break;
3378
3379 case 0xda: /* VMLOAD */
3380 if (!SVME(s) || !PE(s)) {
3381 goto illegal_op;
3382 }
3383 if (!check_cpl0(s)) {
3384 break;
3385 }
3386 gen_update_cc_op(s);
3387 gen_update_eip_cur(s);
3388 gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3389 break;
3390
3391 case 0xdb: /* VMSAVE */
3392 if (!SVME(s) || !PE(s)) {
3393 goto illegal_op;
3394 }
3395 if (!check_cpl0(s)) {
3396 break;
3397 }
3398 gen_update_cc_op(s);
3399 gen_update_eip_cur(s);
3400 gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3401 break;
3402
3403 case 0xdc: /* STGI */
3404 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3405 || !PE(s)) {
3406 goto illegal_op;
3407 }
3408 if (!check_cpl0(s)) {
3409 break;
3410 }
3411 gen_update_cc_op(s);
3412 gen_helper_stgi(tcg_env);
3413 s->base.is_jmp = DISAS_EOB_NEXT;
3414 break;
3415
3416 case 0xdd: /* CLGI */
3417 if (!SVME(s) || !PE(s)) {
3418 goto illegal_op;
3419 }
3420 if (!check_cpl0(s)) {
3421 break;
3422 }
3423 gen_update_cc_op(s);
3424 gen_update_eip_cur(s);
3425 gen_helper_clgi(tcg_env);
3426 break;
3427
3428 case 0xde: /* SKINIT */
3429 if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3430 || !PE(s)) {
3431 goto illegal_op;
3432 }
3433 gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3434 /* If not intercepted, not implemented -- raise #UD. */
3435 goto illegal_op;
3436
3437 case 0xdf: /* INVLPGA */
3438 if (!SVME(s) || !PE(s)) {
3439 goto illegal_op;
3440 }
3441 if (!check_cpl0(s)) {
3442 break;
3443 }
3444 gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3445 if (s->aflag == MO_64) {
3446 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3447 } else {
3448 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3449 }
3450 gen_helper_flush_page(tcg_env, s->A0);
3451 s->base.is_jmp = DISAS_EOB_NEXT;
3452 break;
3453
3454 CASE_MODRM_MEM_OP(2): /* lgdt */
3455 if (!check_cpl0(s)) {
3456 break;
3457 }
3458 gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3459 gen_lea_modrm(env, s, modrm);
3460 gen_op_ld_v(s, MO_16, s->T1, s->A0);
3461 gen_add_A0_im(s, 2);
3462 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3463 if (dflag == MO_16) {
3464 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3465 }
3466 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3467 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3468 break;
3469
3470 CASE_MODRM_MEM_OP(3): /* lidt */
3471 if (!check_cpl0(s)) {
3472 break;
3473 }
3474 gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3475 gen_lea_modrm(env, s, modrm);
3476 gen_op_ld_v(s, MO_16, s->T1, s->A0);
3477 gen_add_A0_im(s, 2);
3478 gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3479 if (dflag == MO_16) {
3480 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3481 }
3482 tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3483 tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3484 break;
3485
3486 CASE_MODRM_OP(4): /* smsw */
3487 if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3488 break;
3489 }
3490 gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3491 tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3492 /*
3493 * In 32-bit mode, the higher 16 bits of the destination
3494 * register are undefined. In practice CR0[31:0] is stored
3495 * just like in 64-bit mode.
3496 */
3497 mod = (modrm >> 6) & 3;
3498 ot = (mod != 3 ? MO_16 : s->dflag);
3499 gen_st_modrm(env, s, modrm, ot);
3500 break;
3501 case 0xee: /* rdpkru */
3502 if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3503 | PREFIX_REPZ | PREFIX_REPNZ)) {
3504 goto illegal_op;
3505 }
3506 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3507 gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3508 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3509 break;
3510 case 0xef: /* wrpkru */
3511 if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3512 | PREFIX_REPZ | PREFIX_REPNZ)) {
3513 goto illegal_op;
3514 }
3515 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3516 cpu_regs[R_EDX]);
3517 tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3518 gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3519 break;
3520
3521 CASE_MODRM_OP(6): /* lmsw */
3522 if (!check_cpl0(s)) {
3523 break;
3524 }
3525 gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3526 gen_ld_modrm(env, s, modrm, MO_16);
3527 /*
3528 * Only the 4 lower bits of CR0 are modified.
3529 * PE cannot be set to zero if already set to one.
3530 */
3531 tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3532 tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3533 tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3534 tcg_gen_or_tl(s->T0, s->T0, s->T1);
3535 gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3536 s->base.is_jmp = DISAS_EOB_NEXT;
3537 break;
3538
3539 CASE_MODRM_MEM_OP(7): /* invlpg */
3540 if (!check_cpl0(s)) {
3541 break;
3542 }
3543 gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3544 gen_lea_modrm(env, s, modrm);
3545 gen_helper_flush_page(tcg_env, s->A0);
3546 s->base.is_jmp = DISAS_EOB_NEXT;
3547 break;
3548
3549 case 0xf8: /* swapgs */
3550 #ifdef TARGET_X86_64
3551 if (CODE64(s)) {
3552 if (check_cpl0(s)) {
3553 tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3554 tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3555 offsetof(CPUX86State, kernelgsbase));
3556 tcg_gen_st_tl(s->T0, tcg_env,
3557 offsetof(CPUX86State, kernelgsbase));
3558 }
3559 break;
3560 }
3561 #endif
3562 goto illegal_op;
3563
3564 case 0xf9: /* rdtscp */
3565 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3566 goto illegal_op;
3567 }
3568 gen_update_cc_op(s);
3569 gen_update_eip_cur(s);
3570 translator_io_start(&s->base);
3571 gen_helper_rdtsc(tcg_env);
3572 gen_helper_rdpid(s->T0, tcg_env);
3573 gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3574 break;
3575
3576 default:
3577 goto unknown_op;
3578 }
3579 break;
3580
3581 case 0x11a:
3582 modrm = x86_ldub_code(env, s);
3583 if (s->flags & HF_MPX_EN_MASK) {
3584 mod = (modrm >> 6) & 3;
3585 reg = ((modrm >> 3) & 7) | REX_R(s);
3586 if (prefixes & PREFIX_REPZ) {
3587 /* bndcl */
3588 if (reg >= 4
3589 || (prefixes & PREFIX_LOCK)
3590 || s->aflag == MO_16) {
3591 goto illegal_op;
3592 }
3593 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
3594 } else if (prefixes & PREFIX_REPNZ) {
3595 /* bndcu */
3596 if (reg >= 4
3597 || (prefixes & PREFIX_LOCK)
3598 || s->aflag == MO_16) {
3599 goto illegal_op;
3600 }
3601 TCGv_i64 notu = tcg_temp_new_i64();
3602 tcg_gen_not_i64(notu, cpu_bndu[reg]);
3603 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
3604 } else if (prefixes & PREFIX_DATA) {
3605 /* bndmov -- from reg/mem */
3606 if (reg >= 4 || s->aflag == MO_16) {
3607 goto illegal_op;
3608 }
3609 if (mod == 3) {
3610 int reg2 = (modrm & 7) | REX_B(s);
3611 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
3612 goto illegal_op;
3613 }
3614 if (s->flags & HF_MPX_IU_MASK) {
3615 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
3616 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
3617 }
3618 } else {
3619 gen_lea_modrm(env, s, modrm);
3620 if (CODE64(s)) {
3621 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3622 s->mem_index, MO_LEUQ);
3623 tcg_gen_addi_tl(s->A0, s->A0, 8);
3624 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3625 s->mem_index, MO_LEUQ);
3626 } else {
3627 tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
3628 s->mem_index, MO_LEUL);
3629 tcg_gen_addi_tl(s->A0, s->A0, 4);
3630 tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
3631 s->mem_index, MO_LEUL);
3632 }
3633 /* bnd registers are now in-use */
3634 gen_set_hflag(s, HF_MPX_IU_MASK);
3635 }
3636 } else if (mod != 3) {
3637 /* bndldx */
3638 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3639 if (reg >= 4
3640 || (prefixes & PREFIX_LOCK)
3641 || s->aflag == MO_16
3642 || a.base < -1) {
3643 goto illegal_op;
3644 }
3645 if (a.base >= 0) {
3646 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3647 } else {
3648 tcg_gen_movi_tl(s->A0, 0);
3649 }
3650 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3651 if (a.index >= 0) {
3652 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3653 } else {
3654 tcg_gen_movi_tl(s->T0, 0);
3655 }
3656 if (CODE64(s)) {
3657 gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
3658 tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
3659 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
3660 } else {
3661 gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
3662 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
3663 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
3664 }
3665 gen_set_hflag(s, HF_MPX_IU_MASK);
3666 }
3667 }
3668 gen_nop_modrm(env, s, modrm);
3669 break;
3670 case 0x11b:
3671 modrm = x86_ldub_code(env, s);
3672 if (s->flags & HF_MPX_EN_MASK) {
3673 mod = (modrm >> 6) & 3;
3674 reg = ((modrm >> 3) & 7) | REX_R(s);
3675 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
3676 /* bndmk */
3677 if (reg >= 4
3678 || (prefixes & PREFIX_LOCK)
3679 || s->aflag == MO_16) {
3680 goto illegal_op;
3681 }
3682 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3683 if (a.base >= 0) {
3684 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
3685 if (!CODE64(s)) {
3686 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
3687 }
3688 } else if (a.base == -1) {
3689 /* no base register has lower bound of 0 */
3690 tcg_gen_movi_i64(cpu_bndl[reg], 0);
3691 } else {
3692 /* rip-relative generates #ud */
3693 goto illegal_op;
3694 }
3695 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
3696 if (!CODE64(s)) {
3697 tcg_gen_ext32u_tl(s->A0, s->A0);
3698 }
3699 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
3700 /* bnd registers are now in-use */
3701 gen_set_hflag(s, HF_MPX_IU_MASK);
3702 break;
3703 } else if (prefixes & PREFIX_REPNZ) {
3704 /* bndcn */
3705 if (reg >= 4
3706 || (prefixes & PREFIX_LOCK)
3707 || s->aflag == MO_16) {
3708 goto illegal_op;
3709 }
3710 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
3711 } else if (prefixes & PREFIX_DATA) {
3712 /* bndmov -- to reg/mem */
3713 if (reg >= 4 || s->aflag == MO_16) {
3714 goto illegal_op;
3715 }
3716 if (mod == 3) {
3717 int reg2 = (modrm & 7) | REX_B(s);
3718 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
3719 goto illegal_op;
3720 }
3721 if (s->flags & HF_MPX_IU_MASK) {
3722 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
3723 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
3724 }
3725 } else {
3726 gen_lea_modrm(env, s, modrm);
3727 if (CODE64(s)) {
3728 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3729 s->mem_index, MO_LEUQ);
3730 tcg_gen_addi_tl(s->A0, s->A0, 8);
3731 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3732 s->mem_index, MO_LEUQ);
3733 } else {
3734 tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
3735 s->mem_index, MO_LEUL);
3736 tcg_gen_addi_tl(s->A0, s->A0, 4);
3737 tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
3738 s->mem_index, MO_LEUL);
3739 }
3740 }
3741 } else if (mod != 3) {
3742 /* bndstx */
3743 AddressParts a = gen_lea_modrm_0(env, s, modrm);
3744 if (reg >= 4
3745 || (prefixes & PREFIX_LOCK)
3746 || s->aflag == MO_16
3747 || a.base < -1) {
3748 goto illegal_op;
3749 }
3750 if (a.base >= 0) {
3751 tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
3752 } else {
3753 tcg_gen_movi_tl(s->A0, 0);
3754 }
3755 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3756 if (a.index >= 0) {
3757 tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
3758 } else {
3759 tcg_gen_movi_tl(s->T0, 0);
3760 }
3761 if (CODE64(s)) {
3762 gen_helper_bndstx64(tcg_env, s->A0, s->T0,
3763 cpu_bndl[reg], cpu_bndu[reg]);
3764 } else {
3765 gen_helper_bndstx32(tcg_env, s->A0, s->T0,
3766 cpu_bndl[reg], cpu_bndu[reg]);
3767 }
3768 }
3769 }
3770 gen_nop_modrm(env, s, modrm);
3771 break;
3772 default:
3773 g_assert_not_reached();
3774 }
3775 return;
3776 illegal_op:
3777 gen_illegal_opcode(s);
3778 return;
3779 unknown_op:
3780 gen_unknown_opcode(env, s);
3781 }
3782
3783 #include "decode-new.h"
3784 #include "emit.c.inc"
3785 #include "decode-new.c.inc"
3786
tcg_x86_init(void)3787 void tcg_x86_init(void)
3788 {
3789 static const char reg_names[CPU_NB_REGS][4] = {
3790 #ifdef TARGET_X86_64
3791 [R_EAX] = "rax",
3792 [R_EBX] = "rbx",
3793 [R_ECX] = "rcx",
3794 [R_EDX] = "rdx",
3795 [R_ESI] = "rsi",
3796 [R_EDI] = "rdi",
3797 [R_EBP] = "rbp",
3798 [R_ESP] = "rsp",
3799 [8] = "r8",
3800 [9] = "r9",
3801 [10] = "r10",
3802 [11] = "r11",
3803 [12] = "r12",
3804 [13] = "r13",
3805 [14] = "r14",
3806 [15] = "r15",
3807 #else
3808 [R_EAX] = "eax",
3809 [R_EBX] = "ebx",
3810 [R_ECX] = "ecx",
3811 [R_EDX] = "edx",
3812 [R_ESI] = "esi",
3813 [R_EDI] = "edi",
3814 [R_EBP] = "ebp",
3815 [R_ESP] = "esp",
3816 #endif
3817 };
3818 static const char eip_name[] = {
3819 #ifdef TARGET_X86_64
3820 "rip"
3821 #else
3822 "eip"
3823 #endif
3824 };
3825 static const char seg_base_names[6][8] = {
3826 [R_CS] = "cs_base",
3827 [R_DS] = "ds_base",
3828 [R_ES] = "es_base",
3829 [R_FS] = "fs_base",
3830 [R_GS] = "gs_base",
3831 [R_SS] = "ss_base",
3832 };
3833 static const char bnd_regl_names[4][8] = {
3834 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3835 };
3836 static const char bnd_regu_names[4][8] = {
3837 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3838 };
3839 int i;
3840
3841 cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
3842 offsetof(CPUX86State, cc_op), "cc_op");
3843 cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
3844 "cc_dst");
3845 cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
3846 "cc_src");
3847 cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
3848 "cc_src2");
3849 cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
3850
3851 for (i = 0; i < CPU_NB_REGS; ++i) {
3852 cpu_regs[i] = tcg_global_mem_new(tcg_env,
3853 offsetof(CPUX86State, regs[i]),
3854 reg_names[i]);
3855 }
3856
3857 for (i = 0; i < 6; ++i) {
3858 cpu_seg_base[i]
3859 = tcg_global_mem_new(tcg_env,
3860 offsetof(CPUX86State, segs[i].base),
3861 seg_base_names[i]);
3862 }
3863
3864 for (i = 0; i < 4; ++i) {
3865 cpu_bndl[i]
3866 = tcg_global_mem_new_i64(tcg_env,
3867 offsetof(CPUX86State, bnd_regs[i].lb),
3868 bnd_regl_names[i]);
3869 cpu_bndu[i]
3870 = tcg_global_mem_new_i64(tcg_env,
3871 offsetof(CPUX86State, bnd_regs[i].ub),
3872 bnd_regu_names[i]);
3873 }
3874 }
3875
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)3876 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
3877 {
3878 DisasContext *dc = container_of(dcbase, DisasContext, base);
3879 CPUX86State *env = cpu_env(cpu);
3880 uint32_t flags = dc->base.tb->flags;
3881 uint32_t cflags = tb_cflags(dc->base.tb);
3882 int cpl = (flags >> HF_CPL_SHIFT) & 3;
3883 int iopl = (flags >> IOPL_SHIFT) & 3;
3884
3885 dc->cs_base = dc->base.tb->cs_base;
3886 dc->pc_save = dc->base.pc_next;
3887 dc->flags = flags;
3888 #ifndef CONFIG_USER_ONLY
3889 dc->cpl = cpl;
3890 dc->iopl = iopl;
3891 #endif
3892
3893 /* We make some simplifying assumptions; validate they're correct. */
3894 g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
3895 g_assert(CPL(dc) == cpl);
3896 g_assert(IOPL(dc) == iopl);
3897 g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
3898 g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
3899 g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
3900 g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
3901 g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
3902 g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
3903 g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
3904 g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
3905
3906 dc->cc_op = CC_OP_DYNAMIC;
3907 dc->cc_op_dirty = false;
3908 /* select memory access functions */
3909 dc->mem_index = cpu_mmu_index(cpu, false);
3910 dc->cpuid_features = env->features[FEAT_1_EDX];
3911 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
3912 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
3913 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
3914 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
3915 dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
3916 dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
3917 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
3918 dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
3919 (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
3920 /*
3921 * If jmp_opt, we want to handle each string instruction individually.
3922 * For icount also disable repz optimization so that each iteration
3923 * is accounted separately.
3924 *
3925 * FIXME: this is messy; it makes REP string instructions a lot less
3926 * efficient than they should be and it gets in the way of correct
3927 * handling of RF (interrupts or traps arriving after any iteration
3928 * of a repeated string instruction but the last should set RF to 1).
3929 * Perhaps it would be more efficient if REP string instructions were
3930 * always at the beginning of the TB, or even their own TB? That
3931 * would even allow accounting up to 64k iterations at once for icount.
3932 */
3933 dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
3934
3935 dc->T0 = tcg_temp_new();
3936 dc->T1 = tcg_temp_new();
3937 dc->A0 = tcg_temp_new();
3938
3939 dc->tmp0 = tcg_temp_new();
3940 dc->tmp1_i64 = tcg_temp_new_i64();
3941 dc->tmp2_i32 = tcg_temp_new_i32();
3942 dc->tmp3_i32 = tcg_temp_new_i32();
3943 dc->tmp4 = tcg_temp_new();
3944 dc->cc_srcT = tcg_temp_new();
3945 }
3946
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)3947 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
3948 {
3949 }
3950
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)3951 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
3952 {
3953 DisasContext *dc = container_of(dcbase, DisasContext, base);
3954 target_ulong pc_arg = dc->base.pc_next;
3955
3956 dc->prev_insn_start = dc->base.insn_start;
3957 dc->prev_insn_end = tcg_last_op();
3958 if (tb_cflags(dcbase->tb) & CF_PCREL) {
3959 pc_arg &= ~TARGET_PAGE_MASK;
3960 }
3961 tcg_gen_insn_start(pc_arg, dc->cc_op);
3962 }
3963
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)3964 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
3965 {
3966 DisasContext *dc = container_of(dcbase, DisasContext, base);
3967 bool orig_cc_op_dirty = dc->cc_op_dirty;
3968 CCOp orig_cc_op = dc->cc_op;
3969 target_ulong orig_pc_save = dc->pc_save;
3970
3971 #ifdef TARGET_VSYSCALL_PAGE
3972 /*
3973 * Detect entry into the vsyscall page and invoke the syscall.
3974 */
3975 if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
3976 gen_exception(dc, EXCP_VSYSCALL);
3977 dc->base.pc_next = dc->pc + 1;
3978 return;
3979 }
3980 #endif
3981
3982 switch (sigsetjmp(dc->jmpbuf, 0)) {
3983 case 0:
3984 disas_insn(dc, cpu);
3985 break;
3986 case 1:
3987 gen_exception_gpf(dc);
3988 break;
3989 case 2:
3990 /* Restore state that may affect the next instruction. */
3991 dc->pc = dc->base.pc_next;
3992 /*
3993 * TODO: These save/restore can be removed after the table-based
3994 * decoder is complete; we will be decoding the insn completely
3995 * before any code generation that might affect these variables.
3996 */
3997 dc->cc_op_dirty = orig_cc_op_dirty;
3998 dc->cc_op = orig_cc_op;
3999 dc->pc_save = orig_pc_save;
4000 /* END TODO */
4001 dc->base.num_insns--;
4002 tcg_remove_ops_after(dc->prev_insn_end);
4003 dc->base.insn_start = dc->prev_insn_start;
4004 dc->base.is_jmp = DISAS_TOO_MANY;
4005 return;
4006 default:
4007 g_assert_not_reached();
4008 }
4009
4010 /*
4011 * Instruction decoding completed (possibly with #GP if the
4012 * 15-byte boundary was exceeded).
4013 */
4014 dc->base.pc_next = dc->pc;
4015 if (dc->base.is_jmp == DISAS_NEXT) {
4016 if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
4017 /*
4018 * If single step mode, we generate only one instruction and
4019 * generate an exception.
4020 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
4021 * the flag and abort the translation to give the irqs a
4022 * chance to happen.
4023 */
4024 dc->base.is_jmp = DISAS_EOB_NEXT;
4025 } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
4026 dc->base.is_jmp = DISAS_TOO_MANY;
4027 }
4028 }
4029 }
4030
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)4031 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
4032 {
4033 DisasContext *dc = container_of(dcbase, DisasContext, base);
4034
4035 switch (dc->base.is_jmp) {
4036 case DISAS_NORETURN:
4037 /*
4038 * Most instructions should not use DISAS_NORETURN, as that suppresses
4039 * the handling of hflags normally done by gen_eob(). We can
4040 * get here:
4041 * - for exception and interrupts
4042 * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
4043 * - for VMRUN because RF/TF handling for the host is done after vmexit,
4044 * and INHIBIT_IRQ is loaded from the VMCB
4045 * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
4046 * the helpers handle themselves the tasks normally done by gen_eob().
4047 */
4048 break;
4049 case DISAS_TOO_MANY:
4050 gen_update_cc_op(dc);
4051 gen_jmp_rel_csize(dc, 0, 0);
4052 break;
4053 case DISAS_EOB_NEXT:
4054 case DISAS_EOB_INHIBIT_IRQ:
4055 assert(dc->base.pc_next == dc->pc);
4056 gen_update_eip_cur(dc);
4057 /* fall through */
4058 case DISAS_EOB_ONLY:
4059 case DISAS_EOB_RECHECK_TF:
4060 case DISAS_JUMP:
4061 gen_eob(dc, dc->base.is_jmp);
4062 break;
4063 default:
4064 g_assert_not_reached();
4065 }
4066 }
4067
4068 static const TranslatorOps i386_tr_ops = {
4069 .init_disas_context = i386_tr_init_disas_context,
4070 .tb_start = i386_tr_tb_start,
4071 .insn_start = i386_tr_insn_start,
4072 .translate_insn = i386_tr_translate_insn,
4073 .tb_stop = i386_tr_tb_stop,
4074 };
4075
4076 /* generate intermediate code for basic block 'tb'. */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)4077 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
4078 vaddr pc, void *host_pc)
4079 {
4080 DisasContext dc;
4081
4082 translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
4083 }
4084