xref: /qemu/target/i386/tcg/translate.c (revision dfc7228b)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 
33 #include "exec/log.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 /* Fixes for Windows namespace pollution.  */
40 #undef IN
41 #undef OUT
42 
43 #define PREFIX_REPZ   0x01
44 #define PREFIX_REPNZ  0x02
45 #define PREFIX_LOCK   0x04
46 #define PREFIX_DATA   0x08
47 #define PREFIX_ADR    0x10
48 #define PREFIX_VEX    0x20
49 #define PREFIX_REX    0x40
50 
51 #ifdef TARGET_X86_64
52 # define ctztl  ctz64
53 # define clztl  clz64
54 #else
55 # define ctztl  ctz32
56 # define clztl  clz32
57 #endif
58 
59 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
60 #define CASE_MODRM_MEM_OP(OP) \
61     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 
65 #define CASE_MODRM_OP(OP) \
66     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 
71 //#define MACRO_TEST   1
72 
73 /* global register indexes */
74 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
75 static TCGv cpu_eip;
76 static TCGv_i32 cpu_cc_op;
77 static TCGv cpu_regs[CPU_NB_REGS];
78 static TCGv cpu_seg_base[6];
79 static TCGv_i64 cpu_bndl[4];
80 static TCGv_i64 cpu_bndu[4];
81 
82 typedef struct DisasContext {
83     DisasContextBase base;
84 
85     target_ulong pc;       /* pc = eip + cs_base */
86     target_ulong cs_base;  /* base of CS segment */
87     target_ulong pc_save;
88 
89     MemOp aflag;
90     MemOp dflag;
91 
92     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
93     uint8_t prefix;
94 
95     bool has_modrm;
96     uint8_t modrm;
97 
98 #ifndef CONFIG_USER_ONLY
99     uint8_t cpl;   /* code priv level */
100     uint8_t iopl;  /* i/o priv level */
101 #endif
102     uint8_t vex_l;  /* vex vector length */
103     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
104     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
105     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
106 
107 #ifdef TARGET_X86_64
108     uint8_t rex_r;
109     uint8_t rex_x;
110     uint8_t rex_b;
111 #endif
112     bool vex_w; /* used by AVX even on 32-bit processors */
113     bool jmp_opt; /* use direct block chaining for direct jumps */
114     bool repz_opt; /* optimize jumps within repz instructions */
115     bool cc_op_dirty;
116 
117     CCOp cc_op;  /* current CC operation */
118     int mem_index; /* select memory access functions */
119     uint32_t flags; /* all execution flags */
120     int cpuid_features;
121     int cpuid_ext_features;
122     int cpuid_ext2_features;
123     int cpuid_ext3_features;
124     int cpuid_7_0_ebx_features;
125     int cpuid_7_0_ecx_features;
126     int cpuid_7_1_eax_features;
127     int cpuid_xsave_features;
128 
129     /* TCG local temps */
130     TCGv cc_srcT;
131     TCGv A0;
132     TCGv T0;
133     TCGv T1;
134 
135     /* TCG local register indexes (only used inside old micro ops) */
136     TCGv tmp0;
137     TCGv tmp4;
138     TCGv_i32 tmp2_i32;
139     TCGv_i32 tmp3_i32;
140     TCGv_i64 tmp1_i64;
141 
142     sigjmp_buf jmpbuf;
143     TCGOp *prev_insn_start;
144     TCGOp *prev_insn_end;
145 } DisasContext;
146 
147 #define DISAS_EOB_ONLY         DISAS_TARGET_0
148 #define DISAS_EOB_NEXT         DISAS_TARGET_1
149 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
150 #define DISAS_JUMP             DISAS_TARGET_3
151 
152 /* The environment in which user-only runs is constrained. */
153 #ifdef CONFIG_USER_ONLY
154 #define PE(S)     true
155 #define CPL(S)    3
156 #define IOPL(S)   0
157 #define SVME(S)   false
158 #define GUEST(S)  false
159 #else
160 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
161 #define CPL(S)    ((S)->cpl)
162 #define IOPL(S)   ((S)->iopl)
163 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
164 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
165 #endif
166 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
167 #define VM86(S)   false
168 #define CODE32(S) true
169 #define SS32(S)   true
170 #define ADDSEG(S) false
171 #else
172 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
173 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
174 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
175 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
176 #endif
177 #if !defined(TARGET_X86_64)
178 #define CODE64(S) false
179 #elif defined(CONFIG_USER_ONLY)
180 #define CODE64(S) true
181 #else
182 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
183 #endif
184 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
185 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
186 #else
187 #define LMA(S)    false
188 #endif
189 
190 #ifdef TARGET_X86_64
191 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
192 #define REX_W(S)       ((S)->vex_w)
193 #define REX_R(S)       ((S)->rex_r + 0)
194 #define REX_X(S)       ((S)->rex_x + 0)
195 #define REX_B(S)       ((S)->rex_b + 0)
196 #else
197 #define REX_PREFIX(S)  false
198 #define REX_W(S)       false
199 #define REX_R(S)       0
200 #define REX_X(S)       0
201 #define REX_B(S)       0
202 #endif
203 
204 /*
205  * Many sysemu-only helpers are not reachable for user-only.
206  * Define stub generators here, so that we need not either sprinkle
207  * ifdefs through the translator, nor provide the helper function.
208  */
209 #define STUB_HELPER(NAME, ...) \
210     static inline void gen_helper_##NAME(__VA_ARGS__) \
211     { qemu_build_not_reached(); }
212 
213 #ifdef CONFIG_USER_ONLY
214 STUB_HELPER(clgi, TCGv_env env)
215 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
216 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
218 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
219 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
220 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
221 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
223 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
224 STUB_HELPER(rdmsr, TCGv_env env)
225 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
226 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
227 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
228 STUB_HELPER(stgi, TCGv_env env)
229 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
230 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
231 STUB_HELPER(vmmcall, TCGv_env env)
232 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
233 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
234 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
235 STUB_HELPER(wrmsr, TCGv_env env)
236 #endif
237 
238 static void gen_eob(DisasContext *s);
239 static void gen_jr(DisasContext *s);
240 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
241 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
242 static void gen_exception_gpf(DisasContext *s);
243 
244 /* i386 shift ops */
245 enum {
246     OP_ROL,
247     OP_ROR,
248     OP_RCL,
249     OP_RCR,
250     OP_SHL,
251     OP_SHR,
252     OP_SHL1, /* undocumented */
253     OP_SAR = 7,
254 };
255 
256 enum {
257     JCC_O,
258     JCC_B,
259     JCC_Z,
260     JCC_BE,
261     JCC_S,
262     JCC_P,
263     JCC_L,
264     JCC_LE,
265 };
266 
267 enum {
268     /* I386 int registers */
269     OR_EAX,   /* MUST be even numbered */
270     OR_ECX,
271     OR_EDX,
272     OR_EBX,
273     OR_ESP,
274     OR_EBP,
275     OR_ESI,
276     OR_EDI,
277 
278     OR_TMP0 = 16,    /* temporary operand register */
279     OR_TMP1,
280     OR_A0, /* temporary register used when doing address evaluation */
281 };
282 
283 enum {
284     USES_CC_DST  = 1,
285     USES_CC_SRC  = 2,
286     USES_CC_SRC2 = 4,
287     USES_CC_SRCT = 8,
288 };
289 
290 /* Bit set if the global variable is live after setting CC_OP to X.  */
291 static const uint8_t cc_op_live[CC_OP_NB] = {
292     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
293     [CC_OP_EFLAGS] = USES_CC_SRC,
294     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
295     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
296     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
297     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
298     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
299     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
300     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
301     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
302     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
303     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
304     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
305     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
307     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
308     [CC_OP_CLR] = 0,
309     [CC_OP_POPCNT] = USES_CC_SRC,
310 };
311 
set_cc_op(DisasContext * s,CCOp op)312 static void set_cc_op(DisasContext *s, CCOp op)
313 {
314     int dead;
315 
316     if (s->cc_op == op) {
317         return;
318     }
319 
320     /* Discard CC computation that will no longer be used.  */
321     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
322     if (dead & USES_CC_DST) {
323         tcg_gen_discard_tl(cpu_cc_dst);
324     }
325     if (dead & USES_CC_SRC) {
326         tcg_gen_discard_tl(cpu_cc_src);
327     }
328     if (dead & USES_CC_SRC2) {
329         tcg_gen_discard_tl(cpu_cc_src2);
330     }
331     if (dead & USES_CC_SRCT) {
332         tcg_gen_discard_tl(s->cc_srcT);
333     }
334 
335     if (op == CC_OP_DYNAMIC) {
336         /* The DYNAMIC setting is translator only, and should never be
337            stored.  Thus we always consider it clean.  */
338         s->cc_op_dirty = false;
339     } else {
340         /* Discard any computed CC_OP value (see shifts).  */
341         if (s->cc_op == CC_OP_DYNAMIC) {
342             tcg_gen_discard_i32(cpu_cc_op);
343         }
344         s->cc_op_dirty = true;
345     }
346     s->cc_op = op;
347 }
348 
gen_update_cc_op(DisasContext * s)349 static void gen_update_cc_op(DisasContext *s)
350 {
351     if (s->cc_op_dirty) {
352         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
353         s->cc_op_dirty = false;
354     }
355 }
356 
357 #ifdef TARGET_X86_64
358 
359 #define NB_OP_SIZES 4
360 
361 #else /* !TARGET_X86_64 */
362 
363 #define NB_OP_SIZES 3
364 
365 #endif /* !TARGET_X86_64 */
366 
367 #if HOST_BIG_ENDIAN
368 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
369 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
370 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
371 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
372 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
373 #else
374 #define REG_B_OFFSET 0
375 #define REG_H_OFFSET 1
376 #define REG_W_OFFSET 0
377 #define REG_L_OFFSET 0
378 #define REG_LH_OFFSET 4
379 #endif
380 
381 /* In instruction encodings for byte register accesses the
382  * register number usually indicates "low 8 bits of register N";
383  * however there are some special cases where N 4..7 indicates
384  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
385  * true for this special case, false otherwise.
386  */
byte_reg_is_xH(DisasContext * s,int reg)387 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
388 {
389     /* Any time the REX prefix is present, byte registers are uniform */
390     if (reg < 4 || REX_PREFIX(s)) {
391         return false;
392     }
393     return true;
394 }
395 
396 /* Select the size of a push/pop operation.  */
mo_pushpop(DisasContext * s,MemOp ot)397 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
398 {
399     if (CODE64(s)) {
400         return ot == MO_16 ? MO_16 : MO_64;
401     } else {
402         return ot;
403     }
404 }
405 
406 /* Select the size of the stack pointer.  */
mo_stacksize(DisasContext * s)407 static inline MemOp mo_stacksize(DisasContext *s)
408 {
409     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
410 }
411 
412 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
413    byte vs word opcodes.  */
mo_b_d(int b,MemOp ot)414 static inline MemOp mo_b_d(int b, MemOp ot)
415 {
416     return b & 1 ? ot : MO_8;
417 }
418 
419 /* Compute the result of writing t0 to the OT-sized register REG.
420  *
421  * If DEST is NULL, store the result into the register and return the
422  * register's TCGv.
423  *
424  * If DEST is not NULL, store the result into DEST and return the
425  * register's TCGv.
426  */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)427 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
428 {
429     switch(ot) {
430     case MO_8:
431         if (byte_reg_is_xH(s, reg)) {
432             dest = dest ? dest : cpu_regs[reg - 4];
433             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
434             return cpu_regs[reg - 4];
435         }
436         dest = dest ? dest : cpu_regs[reg];
437         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
438         break;
439     case MO_16:
440         dest = dest ? dest : cpu_regs[reg];
441         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
442         break;
443     case MO_32:
444         /* For x86_64, this sets the higher half of register to zero.
445            For i386, this is equivalent to a mov. */
446         dest = dest ? dest : cpu_regs[reg];
447         tcg_gen_ext32u_tl(dest, t0);
448         break;
449 #ifdef TARGET_X86_64
450     case MO_64:
451         dest = dest ? dest : cpu_regs[reg];
452         tcg_gen_mov_tl(dest, t0);
453         break;
454 #endif
455     default:
456         g_assert_not_reached();
457     }
458     return cpu_regs[reg];
459 }
460 
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)461 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
462 {
463     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
464 }
465 
466 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)467 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
468 {
469     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
470         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
471     } else {
472         tcg_gen_mov_tl(t0, cpu_regs[reg]);
473     }
474 }
475 
gen_add_A0_im(DisasContext * s,int val)476 static void gen_add_A0_im(DisasContext *s, int val)
477 {
478     tcg_gen_addi_tl(s->A0, s->A0, val);
479     if (!CODE64(s)) {
480         tcg_gen_ext32u_tl(s->A0, s->A0);
481     }
482 }
483 
gen_op_jmp_v(DisasContext * s,TCGv dest)484 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
485 {
486     tcg_gen_mov_tl(cpu_eip, dest);
487     s->pc_save = -1;
488 }
489 
490 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)491 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
492 {
493     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
494     gen_op_mov_reg_v(s, size, reg, s->tmp0);
495 }
496 
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)497 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
498 {
499     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
500     gen_op_mov_reg_v(s, size, reg, s->tmp0);
501 }
502 
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)503 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
504 {
505     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
506 }
507 
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)508 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
509 {
510     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
511 }
512 
gen_op_st_rm_T0_A0(DisasContext * s,int idx,int d)513 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
514 {
515     if (d == OR_TMP0) {
516         gen_op_st_v(s, idx, s->T0, s->A0);
517     } else {
518         gen_op_mov_reg_v(s, idx, d, s->T0);
519     }
520 }
521 
gen_update_eip_cur(DisasContext * s)522 static void gen_update_eip_cur(DisasContext *s)
523 {
524     assert(s->pc_save != -1);
525     if (tb_cflags(s->base.tb) & CF_PCREL) {
526         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
527     } else if (CODE64(s)) {
528         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
529     } else {
530         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
531     }
532     s->pc_save = s->base.pc_next;
533 }
534 
gen_update_eip_next(DisasContext * s)535 static void gen_update_eip_next(DisasContext *s)
536 {
537     assert(s->pc_save != -1);
538     if (tb_cflags(s->base.tb) & CF_PCREL) {
539         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
540     } else if (CODE64(s)) {
541         tcg_gen_movi_tl(cpu_eip, s->pc);
542     } else {
543         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
544     }
545     s->pc_save = s->pc;
546 }
547 
cur_insn_len(DisasContext * s)548 static int cur_insn_len(DisasContext *s)
549 {
550     return s->pc - s->base.pc_next;
551 }
552 
cur_insn_len_i32(DisasContext * s)553 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
554 {
555     return tcg_constant_i32(cur_insn_len(s));
556 }
557 
eip_next_i32(DisasContext * s)558 static TCGv_i32 eip_next_i32(DisasContext *s)
559 {
560     assert(s->pc_save != -1);
561     /*
562      * This function has two users: lcall_real (always 16-bit mode), and
563      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
564      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
565      * why passing a 32-bit value isn't broken.  To avoid using this where
566      * we shouldn't, return -1 in 64-bit mode so that execution goes into
567      * the weeds quickly.
568      */
569     if (CODE64(s)) {
570         return tcg_constant_i32(-1);
571     }
572     if (tb_cflags(s->base.tb) & CF_PCREL) {
573         TCGv_i32 ret = tcg_temp_new_i32();
574         tcg_gen_trunc_tl_i32(ret, cpu_eip);
575         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
576         return ret;
577     } else {
578         return tcg_constant_i32(s->pc - s->cs_base);
579     }
580 }
581 
eip_next_tl(DisasContext * s)582 static TCGv eip_next_tl(DisasContext *s)
583 {
584     assert(s->pc_save != -1);
585     if (tb_cflags(s->base.tb) & CF_PCREL) {
586         TCGv ret = tcg_temp_new();
587         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
588         return ret;
589     } else if (CODE64(s)) {
590         return tcg_constant_tl(s->pc);
591     } else {
592         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
593     }
594 }
595 
eip_cur_tl(DisasContext * s)596 static TCGv eip_cur_tl(DisasContext *s)
597 {
598     assert(s->pc_save != -1);
599     if (tb_cflags(s->base.tb) & CF_PCREL) {
600         TCGv ret = tcg_temp_new();
601         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
602         return ret;
603     } else if (CODE64(s)) {
604         return tcg_constant_tl(s->base.pc_next);
605     } else {
606         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
607     }
608 }
609 
610 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
611    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
612    indicate no override.  */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)613 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
614                                int def_seg, int ovr_seg)
615 {
616     switch (aflag) {
617 #ifdef TARGET_X86_64
618     case MO_64:
619         if (ovr_seg < 0) {
620             tcg_gen_mov_tl(dest, a0);
621             return;
622         }
623         break;
624 #endif
625     case MO_32:
626         /* 32 bit address */
627         if (ovr_seg < 0 && ADDSEG(s)) {
628             ovr_seg = def_seg;
629         }
630         if (ovr_seg < 0) {
631             tcg_gen_ext32u_tl(dest, a0);
632             return;
633         }
634         break;
635     case MO_16:
636         /* 16 bit address */
637         tcg_gen_ext16u_tl(dest, a0);
638         a0 = dest;
639         if (ovr_seg < 0) {
640             if (ADDSEG(s)) {
641                 ovr_seg = def_seg;
642             } else {
643                 return;
644             }
645         }
646         break;
647     default:
648         g_assert_not_reached();
649     }
650 
651     if (ovr_seg >= 0) {
652         TCGv seg = cpu_seg_base[ovr_seg];
653 
654         if (aflag == MO_64) {
655             tcg_gen_add_tl(dest, a0, seg);
656         } else if (CODE64(s)) {
657             tcg_gen_ext32u_tl(dest, a0);
658             tcg_gen_add_tl(dest, dest, seg);
659         } else {
660             tcg_gen_add_tl(dest, a0, seg);
661             tcg_gen_ext32u_tl(dest, dest);
662         }
663     }
664 }
665 
gen_lea_v_seg(DisasContext * s,MemOp aflag,TCGv a0,int def_seg,int ovr_seg)666 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
667                           int def_seg, int ovr_seg)
668 {
669     gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
670 }
671 
gen_string_movl_A0_ESI(DisasContext * s)672 static inline void gen_string_movl_A0_ESI(DisasContext *s)
673 {
674     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
675 }
676 
gen_string_movl_A0_EDI(DisasContext * s)677 static inline void gen_string_movl_A0_EDI(DisasContext *s)
678 {
679     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
680 }
681 
gen_compute_Dshift(DisasContext * s,MemOp ot)682 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
683 {
684     TCGv dshift = tcg_temp_new();
685     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
686     tcg_gen_shli_tl(dshift, dshift, ot);
687     return dshift;
688 };
689 
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)690 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
691 {
692     if (size == MO_TL) {
693         return src;
694     }
695     if (!dst) {
696         dst = tcg_temp_new();
697     }
698     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
699     return dst;
700 }
701 
gen_extu(MemOp ot,TCGv reg)702 static void gen_extu(MemOp ot, TCGv reg)
703 {
704     gen_ext_tl(reg, reg, ot, false);
705 }
706 
gen_exts(MemOp ot,TCGv reg)707 static void gen_exts(MemOp ot, TCGv reg)
708 {
709     gen_ext_tl(reg, reg, ot, true);
710 }
711 
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)712 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
713 {
714     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
715 
716     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
717 }
718 
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)719 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
720 {
721     gen_op_j_ecx(s, TCG_COND_EQ, label1);
722 }
723 
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)724 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
725 {
726     gen_op_j_ecx(s, TCG_COND_NE, label1);
727 }
728 
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)729 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
730 {
731     switch (ot) {
732     case MO_8:
733         gen_helper_inb(v, tcg_env, n);
734         break;
735     case MO_16:
736         gen_helper_inw(v, tcg_env, n);
737         break;
738     case MO_32:
739         gen_helper_inl(v, tcg_env, n);
740         break;
741     default:
742         g_assert_not_reached();
743     }
744 }
745 
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)746 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
747 {
748     switch (ot) {
749     case MO_8:
750         gen_helper_outb(tcg_env, v, n);
751         break;
752     case MO_16:
753         gen_helper_outw(tcg_env, v, n);
754         break;
755     case MO_32:
756         gen_helper_outl(tcg_env, v, n);
757         break;
758     default:
759         g_assert_not_reached();
760     }
761 }
762 
763 /*
764  * Validate that access to [port, port + 1<<ot) is allowed.
765  * Raise #GP, or VMM exit if not.
766  */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)767 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
768                          uint32_t svm_flags)
769 {
770 #ifdef CONFIG_USER_ONLY
771     /*
772      * We do not implement the ioperm(2) syscall, so the TSS check
773      * will always fail.
774      */
775     gen_exception_gpf(s);
776     return false;
777 #else
778     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
779         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
780     }
781     if (GUEST(s)) {
782         gen_update_cc_op(s);
783         gen_update_eip_cur(s);
784         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
785             svm_flags |= SVM_IOIO_REP_MASK;
786         }
787         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
788         gen_helper_svm_check_io(tcg_env, port,
789                                 tcg_constant_i32(svm_flags),
790                                 cur_insn_len_i32(s));
791     }
792     return true;
793 #endif
794 }
795 
gen_movs(DisasContext * s,MemOp ot)796 static void gen_movs(DisasContext *s, MemOp ot)
797 {
798     TCGv dshift;
799 
800     gen_string_movl_A0_ESI(s);
801     gen_op_ld_v(s, ot, s->T0, s->A0);
802     gen_string_movl_A0_EDI(s);
803     gen_op_st_v(s, ot, s->T0, s->A0);
804 
805     dshift = gen_compute_Dshift(s, ot);
806     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
807     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
808 }
809 
gen_op_update1_cc(DisasContext * s)810 static void gen_op_update1_cc(DisasContext *s)
811 {
812     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
813 }
814 
gen_op_update2_cc(DisasContext * s)815 static void gen_op_update2_cc(DisasContext *s)
816 {
817     tcg_gen_mov_tl(cpu_cc_src, s->T1);
818     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
819 }
820 
821 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)822 static void gen_mov_eflags(DisasContext *s, TCGv reg)
823 {
824     TCGv dst, src1, src2;
825     TCGv_i32 cc_op;
826     int live, dead;
827 
828     if (s->cc_op == CC_OP_EFLAGS) {
829         tcg_gen_mov_tl(reg, cpu_cc_src);
830         return;
831     }
832     if (s->cc_op == CC_OP_CLR) {
833         tcg_gen_movi_tl(reg, CC_Z | CC_P);
834         return;
835     }
836 
837     dst = cpu_cc_dst;
838     src1 = cpu_cc_src;
839     src2 = cpu_cc_src2;
840 
841     /* Take care to not read values that are not live.  */
842     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
843     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
844     if (dead) {
845         TCGv zero = tcg_constant_tl(0);
846         if (dead & USES_CC_DST) {
847             dst = zero;
848         }
849         if (dead & USES_CC_SRC) {
850             src1 = zero;
851         }
852         if (dead & USES_CC_SRC2) {
853             src2 = zero;
854         }
855     }
856 
857     if (s->cc_op != CC_OP_DYNAMIC) {
858         cc_op = tcg_constant_i32(s->cc_op);
859     } else {
860         cc_op = cpu_cc_op;
861     }
862     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
863 }
864 
865 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)866 static void gen_compute_eflags(DisasContext *s)
867 {
868     gen_mov_eflags(s, cpu_cc_src);
869     set_cc_op(s, CC_OP_EFLAGS);
870 }
871 
872 typedef struct CCPrepare {
873     TCGCond cond;
874     TCGv reg;
875     TCGv reg2;
876     target_ulong imm;
877     bool use_reg2;
878     bool no_setcond;
879 } CCPrepare;
880 
gen_prepare_sign_nz(TCGv src,MemOp size)881 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
882 {
883     if (size == MO_TL) {
884         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
885     } else {
886         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
887                              .imm = 1ull << ((8 << size) - 1) };
888     }
889 }
890 
891 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)892 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
893 {
894     MemOp size;
895 
896     switch (s->cc_op) {
897     case CC_OP_SUBB ... CC_OP_SUBQ:
898         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
899         size = s->cc_op - CC_OP_SUBB;
900         gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
901         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
902         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
903                              .reg2 = cpu_cc_src, .use_reg2 = true };
904 
905     case CC_OP_ADDB ... CC_OP_ADDQ:
906         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
907         size = s->cc_op - CC_OP_ADDB;
908         gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
909         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
910         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
911                              .reg2 = cpu_cc_src, .use_reg2 = true };
912 
913     case CC_OP_LOGICB ... CC_OP_LOGICQ:
914     case CC_OP_CLR:
915     case CC_OP_POPCNT:
916         return (CCPrepare) { .cond = TCG_COND_NEVER };
917 
918     case CC_OP_INCB ... CC_OP_INCQ:
919     case CC_OP_DECB ... CC_OP_DECQ:
920         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
921                              .no_setcond = true };
922 
923     case CC_OP_SHLB ... CC_OP_SHLQ:
924         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
925         size = s->cc_op - CC_OP_SHLB;
926         return gen_prepare_sign_nz(cpu_cc_src, size);
927 
928     case CC_OP_MULB ... CC_OP_MULQ:
929         return (CCPrepare) { .cond = TCG_COND_NE,
930                              .reg = cpu_cc_src };
931 
932     case CC_OP_BMILGB ... CC_OP_BMILGQ:
933         size = s->cc_op - CC_OP_BMILGB;
934         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
935         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
936 
937     case CC_OP_ADCX:
938     case CC_OP_ADCOX:
939         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
940                              .no_setcond = true };
941 
942     case CC_OP_EFLAGS:
943     case CC_OP_SARB ... CC_OP_SARQ:
944         /* CC_SRC & 1 */
945         return (CCPrepare) { .cond = TCG_COND_TSTNE,
946                              .reg = cpu_cc_src, .imm = CC_C };
947 
948     default:
949        /* The need to compute only C from CC_OP_DYNAMIC is important
950           in efficiently implementing e.g. INC at the start of a TB.  */
951        gen_update_cc_op(s);
952        if (!reg) {
953            reg = tcg_temp_new();
954        }
955        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
956                                cpu_cc_src2, cpu_cc_op);
957        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
958                             .no_setcond = true };
959     }
960 }
961 
962 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)963 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
964 {
965     gen_compute_eflags(s);
966     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
967                          .imm = CC_P };
968 }
969 
970 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)971 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
972 {
973     switch (s->cc_op) {
974     case CC_OP_DYNAMIC:
975         gen_compute_eflags(s);
976         /* FALLTHRU */
977     case CC_OP_EFLAGS:
978     case CC_OP_ADCX:
979     case CC_OP_ADOX:
980     case CC_OP_ADCOX:
981         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
982                              .imm = CC_S };
983     case CC_OP_CLR:
984     case CC_OP_POPCNT:
985         return (CCPrepare) { .cond = TCG_COND_NEVER };
986     default:
987         {
988             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
989             return gen_prepare_sign_nz(cpu_cc_dst, size);
990         }
991     }
992 }
993 
994 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)995 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
996 {
997     switch (s->cc_op) {
998     case CC_OP_ADOX:
999     case CC_OP_ADCOX:
1000         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1001                              .no_setcond = true };
1002     case CC_OP_CLR:
1003     case CC_OP_POPCNT:
1004         return (CCPrepare) { .cond = TCG_COND_NEVER };
1005     case CC_OP_MULB ... CC_OP_MULQ:
1006         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1007     default:
1008         gen_compute_eflags(s);
1009         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1010                              .imm = CC_O };
1011     }
1012 }
1013 
1014 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1015 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1016 {
1017     switch (s->cc_op) {
1018     case CC_OP_DYNAMIC:
1019         gen_compute_eflags(s);
1020         /* FALLTHRU */
1021     case CC_OP_EFLAGS:
1022     case CC_OP_ADCX:
1023     case CC_OP_ADOX:
1024     case CC_OP_ADCOX:
1025         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1026                              .imm = CC_Z };
1027     case CC_OP_CLR:
1028         return (CCPrepare) { .cond = TCG_COND_ALWAYS };
1029     case CC_OP_POPCNT:
1030         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
1031     default:
1032         {
1033             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1034             if (size == MO_TL) {
1035                 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1036             } else {
1037                 return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst,
1038                                      .imm = (1ull << (8 << size)) - 1 };
1039             }
1040         }
1041     }
1042 }
1043 
1044 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1045  * if needed; it may be used for CCPrepare.reg if that will
1046  * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1047 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1048 {
1049     int inv, jcc_op, cond;
1050     MemOp size;
1051     CCPrepare cc;
1052 
1053     inv = b & 1;
1054     jcc_op = (b >> 1) & 7;
1055 
1056     switch (s->cc_op) {
1057     case CC_OP_SUBB ... CC_OP_SUBQ:
1058         /* We optimize relational operators for the cmp/jcc case.  */
1059         size = s->cc_op - CC_OP_SUBB;
1060         switch (jcc_op) {
1061         case JCC_BE:
1062             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
1063             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
1064             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1065                                .reg2 = cpu_cc_src, .use_reg2 = true };
1066             break;
1067         case JCC_L:
1068             cond = TCG_COND_LT;
1069             goto fast_jcc_l;
1070         case JCC_LE:
1071             cond = TCG_COND_LE;
1072         fast_jcc_l:
1073             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
1074             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
1075             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1076                                .reg2 = cpu_cc_src, .use_reg2 = true };
1077             break;
1078 
1079         default:
1080             goto slow_jcc;
1081         }
1082         break;
1083 
1084     default:
1085     slow_jcc:
1086         /* This actually generates good code for JC, JZ and JS.  */
1087         switch (jcc_op) {
1088         case JCC_O:
1089             cc = gen_prepare_eflags_o(s, reg);
1090             break;
1091         case JCC_B:
1092             cc = gen_prepare_eflags_c(s, reg);
1093             break;
1094         case JCC_Z:
1095             cc = gen_prepare_eflags_z(s, reg);
1096             break;
1097         case JCC_BE:
1098             gen_compute_eflags(s);
1099             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1100                                .imm = CC_Z | CC_C };
1101             break;
1102         case JCC_S:
1103             cc = gen_prepare_eflags_s(s, reg);
1104             break;
1105         case JCC_P:
1106             cc = gen_prepare_eflags_p(s, reg);
1107             break;
1108         case JCC_L:
1109             gen_compute_eflags(s);
1110             if (!reg || reg == cpu_cc_src) {
1111                 reg = tcg_temp_new();
1112             }
1113             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1114             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1115                                .imm = CC_O };
1116             break;
1117         default:
1118         case JCC_LE:
1119             gen_compute_eflags(s);
1120             if (!reg || reg == cpu_cc_src) {
1121                 reg = tcg_temp_new();
1122             }
1123             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1124             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1125                                .imm = CC_O | CC_Z };
1126             break;
1127         }
1128         break;
1129     }
1130 
1131     if (inv) {
1132         cc.cond = tcg_invert_cond(cc.cond);
1133     }
1134     return cc;
1135 }
1136 
gen_setcc1(DisasContext * s,int b,TCGv reg)1137 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1138 {
1139     CCPrepare cc = gen_prepare_cc(s, b, reg);
1140 
1141     if (cc.no_setcond) {
1142         if (cc.cond == TCG_COND_EQ) {
1143             tcg_gen_xori_tl(reg, cc.reg, 1);
1144         } else {
1145             tcg_gen_mov_tl(reg, cc.reg);
1146         }
1147         return;
1148     }
1149 
1150     if (cc.use_reg2) {
1151         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1152     } else {
1153         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1154     }
1155 }
1156 
gen_compute_eflags_c(DisasContext * s,TCGv reg)1157 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1158 {
1159     gen_setcc1(s, JCC_B << 1, reg);
1160 }
1161 
1162 /* generate a conditional jump to label 'l1' according to jump opcode
1163    value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc1_noeob(DisasContext * s,int b,TCGLabel * l1)1164 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1165 {
1166     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1167 
1168     if (cc.use_reg2) {
1169         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1170     } else {
1171         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1172     }
1173 }
1174 
1175 /* Generate a conditional jump to label 'l1' according to jump opcode
1176    value 'b'. In the fast case, T0 is guaranteed not to be used.
1177    One or both of the branches will call gen_jmp_rel, so ensure
1178    cc_op is clean.  */
gen_jcc1(DisasContext * s,int b,TCGLabel * l1)1179 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1180 {
1181     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1182 
1183     gen_update_cc_op(s);
1184     if (cc.use_reg2) {
1185         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1186     } else {
1187         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1188     }
1189 }
1190 
1191 /* XXX: does not work with gdbstub "ice" single step - not a
1192    serious problem.  The caller can jump to the returned label
1193    to stop the REP but, if the flags have changed, it has to call
1194    gen_update_cc_op before doing so.  */
gen_jz_ecx_string(DisasContext * s)1195 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1196 {
1197     TCGLabel *l1 = gen_new_label();
1198     TCGLabel *l2 = gen_new_label();
1199 
1200     gen_update_cc_op(s);
1201     gen_op_jnz_ecx(s, l1);
1202     gen_set_label(l2);
1203     gen_jmp_rel_csize(s, 0, 1);
1204     gen_set_label(l1);
1205     return l2;
1206 }
1207 
gen_stos(DisasContext * s,MemOp ot)1208 static void gen_stos(DisasContext *s, MemOp ot)
1209 {
1210     gen_string_movl_A0_EDI(s);
1211     gen_op_st_v(s, ot, s->T0, s->A0);
1212     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1213 }
1214 
gen_lods(DisasContext * s,MemOp ot)1215 static void gen_lods(DisasContext *s, MemOp ot)
1216 {
1217     gen_string_movl_A0_ESI(s);
1218     gen_op_ld_v(s, ot, s->T0, s->A0);
1219     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1220     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1221 }
1222 
gen_scas(DisasContext * s,MemOp ot)1223 static void gen_scas(DisasContext *s, MemOp ot)
1224 {
1225     gen_string_movl_A0_EDI(s);
1226     gen_op_ld_v(s, ot, s->T1, s->A0);
1227     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1228     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1229     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1230     set_cc_op(s, CC_OP_SUBB + ot);
1231 
1232     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1233 }
1234 
gen_cmps(DisasContext * s,MemOp ot)1235 static void gen_cmps(DisasContext *s, MemOp ot)
1236 {
1237     TCGv dshift;
1238 
1239     gen_string_movl_A0_EDI(s);
1240     gen_op_ld_v(s, ot, s->T1, s->A0);
1241     gen_string_movl_A0_ESI(s);
1242     gen_op_ld_v(s, ot, s->T0, s->A0);
1243     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1244     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1245     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1246     set_cc_op(s, CC_OP_SUBB + ot);
1247 
1248     dshift = gen_compute_Dshift(s, ot);
1249     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1250     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1251 }
1252 
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1253 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1254 {
1255     if (s->flags & HF_IOBPT_MASK) {
1256 #ifdef CONFIG_USER_ONLY
1257         /* user-mode cpu should not be in IOBPT mode */
1258         g_assert_not_reached();
1259 #else
1260         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1261         TCGv t_next = eip_next_tl(s);
1262         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1263 #endif /* CONFIG_USER_ONLY */
1264     }
1265 }
1266 
gen_ins(DisasContext * s,MemOp ot)1267 static void gen_ins(DisasContext *s, MemOp ot)
1268 {
1269     gen_string_movl_A0_EDI(s);
1270     /* Note: we must do this dummy write first to be restartable in
1271        case of page fault. */
1272     tcg_gen_movi_tl(s->T0, 0);
1273     gen_op_st_v(s, ot, s->T0, s->A0);
1274     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1275     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1276     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1277     gen_op_st_v(s, ot, s->T0, s->A0);
1278     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1279     gen_bpt_io(s, s->tmp2_i32, ot);
1280 }
1281 
gen_outs(DisasContext * s,MemOp ot)1282 static void gen_outs(DisasContext *s, MemOp ot)
1283 {
1284     gen_string_movl_A0_ESI(s);
1285     gen_op_ld_v(s, ot, s->T0, s->A0);
1286 
1287     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1288     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1289     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1290     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1291     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1292     gen_bpt_io(s, s->tmp2_i32, ot);
1293 }
1294 
1295 /* Generate jumps to current or next instruction */
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1296 static void gen_repz(DisasContext *s, MemOp ot,
1297                      void (*fn)(DisasContext *s, MemOp ot))
1298 {
1299     TCGLabel *l2;
1300     l2 = gen_jz_ecx_string(s);
1301     fn(s, ot);
1302     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1303     /*
1304      * A loop would cause two single step exceptions if ECX = 1
1305      * before rep string_insn
1306      */
1307     if (s->repz_opt) {
1308         gen_op_jz_ecx(s, l2);
1309     }
1310     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1311 }
1312 
1313 #define GEN_REPZ(op) \
1314     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1315     { gen_repz(s, ot, gen_##op); }
1316 
gen_repz2(DisasContext * s,MemOp ot,int nz,void (* fn)(DisasContext * s,MemOp ot))1317 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1318                       void (*fn)(DisasContext *s, MemOp ot))
1319 {
1320     TCGLabel *l2;
1321     l2 = gen_jz_ecx_string(s);
1322     fn(s, ot);
1323     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1324     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1325     if (s->repz_opt) {
1326         gen_op_jz_ecx(s, l2);
1327     }
1328     /*
1329      * Only one iteration is done at a time, so the translation
1330      * block ends unconditionally after this instruction and there
1331      * is no control flow junction - no need to set CC_OP_DYNAMIC.
1332      */
1333     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1334 }
1335 
1336 #define GEN_REPZ2(op) \
1337     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1338     { gen_repz2(s, ot, nz, gen_##op); }
1339 
1340 GEN_REPZ(movs)
GEN_REPZ(stos)1341 GEN_REPZ(stos)
1342 GEN_REPZ(lods)
1343 GEN_REPZ(ins)
1344 GEN_REPZ(outs)
1345 GEN_REPZ2(scas)
1346 GEN_REPZ2(cmps)
1347 
1348 static void gen_helper_fp_arith_ST0_FT0(int op)
1349 {
1350     switch (op) {
1351     case 0:
1352         gen_helper_fadd_ST0_FT0(tcg_env);
1353         break;
1354     case 1:
1355         gen_helper_fmul_ST0_FT0(tcg_env);
1356         break;
1357     case 2:
1358         gen_helper_fcom_ST0_FT0(tcg_env);
1359         break;
1360     case 3:
1361         gen_helper_fcom_ST0_FT0(tcg_env);
1362         break;
1363     case 4:
1364         gen_helper_fsub_ST0_FT0(tcg_env);
1365         break;
1366     case 5:
1367         gen_helper_fsubr_ST0_FT0(tcg_env);
1368         break;
1369     case 6:
1370         gen_helper_fdiv_ST0_FT0(tcg_env);
1371         break;
1372     case 7:
1373         gen_helper_fdivr_ST0_FT0(tcg_env);
1374         break;
1375     }
1376 }
1377 
1378 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1379 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1380 {
1381     TCGv_i32 tmp = tcg_constant_i32(opreg);
1382     switch (op) {
1383     case 0:
1384         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1385         break;
1386     case 1:
1387         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1388         break;
1389     case 4:
1390         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1391         break;
1392     case 5:
1393         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1394         break;
1395     case 6:
1396         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1397         break;
1398     case 7:
1399         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1400         break;
1401     }
1402 }
1403 
gen_exception(DisasContext * s,int trapno)1404 static void gen_exception(DisasContext *s, int trapno)
1405 {
1406     gen_update_cc_op(s);
1407     gen_update_eip_cur(s);
1408     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1409     s->base.is_jmp = DISAS_NORETURN;
1410 }
1411 
1412 /* Generate #UD for the current instruction.  The assumption here is that
1413    the instruction is known, but it isn't allowed in the current cpu mode.  */
gen_illegal_opcode(DisasContext * s)1414 static void gen_illegal_opcode(DisasContext *s)
1415 {
1416     gen_exception(s, EXCP06_ILLOP);
1417 }
1418 
1419 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1420 static void gen_exception_gpf(DisasContext *s)
1421 {
1422     gen_exception(s, EXCP0D_GPF);
1423 }
1424 
1425 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1426 static bool check_cpl0(DisasContext *s)
1427 {
1428     if (CPL(s) == 0) {
1429         return true;
1430     }
1431     gen_exception_gpf(s);
1432     return false;
1433 }
1434 
gen_shift_flags(DisasContext * s,MemOp ot,TCGv result,TCGv shm1,TCGv count,bool is_right)1435 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1436                             TCGv shm1, TCGv count, bool is_right)
1437 {
1438     TCGv_i32 z32, s32, oldop;
1439     TCGv z_tl;
1440 
1441     /* Store the results into the CC variables.  If we know that the
1442        variable must be dead, store unconditionally.  Otherwise we'll
1443        need to not disrupt the current contents.  */
1444     z_tl = tcg_constant_tl(0);
1445     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1446         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1447                            result, cpu_cc_dst);
1448     } else {
1449         tcg_gen_mov_tl(cpu_cc_dst, result);
1450     }
1451     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1452         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1453                            shm1, cpu_cc_src);
1454     } else {
1455         tcg_gen_mov_tl(cpu_cc_src, shm1);
1456     }
1457 
1458     /* Get the two potential CC_OP values into temporaries.  */
1459     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1460     if (s->cc_op == CC_OP_DYNAMIC) {
1461         oldop = cpu_cc_op;
1462     } else {
1463         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1464         oldop = s->tmp3_i32;
1465     }
1466 
1467     /* Conditionally store the CC_OP value.  */
1468     z32 = tcg_constant_i32(0);
1469     s32 = tcg_temp_new_i32();
1470     tcg_gen_trunc_tl_i32(s32, count);
1471     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1472 
1473     /* The CC_OP value is no longer predictable.  */
1474     set_cc_op(s, CC_OP_DYNAMIC);
1475 }
1476 
1477 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,int op1,bool is_right,TCGv count_in)1478 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1479                              bool is_right, TCGv count_in)
1480 {
1481     target_ulong mask = (ot == MO_64 ? 63 : 31);
1482     TCGv count;
1483 
1484     /* load */
1485     if (op1 == OR_TMP0) {
1486         gen_op_ld_v(s, ot, s->T0, s->A0);
1487     } else {
1488         gen_op_mov_v_reg(s, ot, s->T0, op1);
1489     }
1490 
1491     count = tcg_temp_new();
1492     tcg_gen_andi_tl(count, count_in, mask);
1493 
1494     switch (ot) {
1495     case MO_16:
1496         /* Note: we implement the Intel behaviour for shift count > 16.
1497            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1498            portion by constructing it as a 32-bit value.  */
1499         if (is_right) {
1500             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1501             tcg_gen_mov_tl(s->T1, s->T0);
1502             tcg_gen_mov_tl(s->T0, s->tmp0);
1503         } else {
1504             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1505         }
1506         /*
1507          * If TARGET_X86_64 defined then fall through into MO_32 case,
1508          * otherwise fall through default case.
1509          */
1510     case MO_32:
1511 #ifdef TARGET_X86_64
1512         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1513         tcg_gen_subi_tl(s->tmp0, count, 1);
1514         if (is_right) {
1515             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1516             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1517             tcg_gen_shr_i64(s->T0, s->T0, count);
1518         } else {
1519             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1520             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1521             tcg_gen_shl_i64(s->T0, s->T0, count);
1522             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1523             tcg_gen_shri_i64(s->T0, s->T0, 32);
1524         }
1525         break;
1526 #endif
1527     default:
1528         tcg_gen_subi_tl(s->tmp0, count, 1);
1529         if (is_right) {
1530             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1531 
1532             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1533             tcg_gen_shr_tl(s->T0, s->T0, count);
1534             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1535         } else {
1536             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1537             if (ot == MO_16) {
1538                 /* Only needed if count > 16, for Intel behaviour.  */
1539                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1540                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1541                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1542             }
1543 
1544             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1545             tcg_gen_shl_tl(s->T0, s->T0, count);
1546             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1547         }
1548         tcg_gen_movi_tl(s->tmp4, 0);
1549         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1550                            s->tmp4, s->T1);
1551         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1552         break;
1553     }
1554 
1555     /* store */
1556     gen_op_st_rm_T0_A0(s, ot, op1);
1557 
1558     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
1559 }
1560 
1561 #define X86_MAX_INSN_LENGTH 15
1562 
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1563 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1564 {
1565     uint64_t pc = s->pc;
1566 
1567     /* This is a subsequent insn that crosses a page boundary.  */
1568     if (s->base.num_insns > 1 &&
1569         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1570         siglongjmp(s->jmpbuf, 2);
1571     }
1572 
1573     s->pc += num_bytes;
1574     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1575         /* If the instruction's 16th byte is on a different page than the 1st, a
1576          * page fault on the second page wins over the general protection fault
1577          * caused by the instruction being too long.
1578          * This can happen even if the operand is only one byte long!
1579          */
1580         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1581             (void)translator_ldub(env, &s->base,
1582                                   (s->pc - 1) & TARGET_PAGE_MASK);
1583         }
1584         siglongjmp(s->jmpbuf, 1);
1585     }
1586 
1587     return pc;
1588 }
1589 
x86_ldub_code(CPUX86State * env,DisasContext * s)1590 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1591 {
1592     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1593 }
1594 
x86_lduw_code(CPUX86State * env,DisasContext * s)1595 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1596 {
1597     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1598 }
1599 
x86_ldl_code(CPUX86State * env,DisasContext * s)1600 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1601 {
1602     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1603 }
1604 
1605 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1606 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1607 {
1608     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1609 }
1610 #endif
1611 
1612 /* Decompose an address.  */
1613 
1614 typedef struct AddressParts {
1615     int def_seg;
1616     int base;
1617     int index;
1618     int scale;
1619     target_long disp;
1620 } AddressParts;
1621 
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm)1622 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1623                                     int modrm)
1624 {
1625     int def_seg, base, index, scale, mod, rm;
1626     target_long disp;
1627     bool havesib;
1628 
1629     def_seg = R_DS;
1630     index = -1;
1631     scale = 0;
1632     disp = 0;
1633 
1634     mod = (modrm >> 6) & 3;
1635     rm = modrm & 7;
1636     base = rm | REX_B(s);
1637 
1638     if (mod == 3) {
1639         /* Normally filtered out earlier, but including this path
1640            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1641         goto done;
1642     }
1643 
1644     switch (s->aflag) {
1645     case MO_64:
1646     case MO_32:
1647         havesib = 0;
1648         if (rm == 4) {
1649             int code = x86_ldub_code(env, s);
1650             scale = (code >> 6) & 3;
1651             index = ((code >> 3) & 7) | REX_X(s);
1652             if (index == 4) {
1653                 index = -1;  /* no index */
1654             }
1655             base = (code & 7) | REX_B(s);
1656             havesib = 1;
1657         }
1658 
1659         switch (mod) {
1660         case 0:
1661             if ((base & 7) == 5) {
1662                 base = -1;
1663                 disp = (int32_t)x86_ldl_code(env, s);
1664                 if (CODE64(s) && !havesib) {
1665                     base = -2;
1666                     disp += s->pc + s->rip_offset;
1667                 }
1668             }
1669             break;
1670         case 1:
1671             disp = (int8_t)x86_ldub_code(env, s);
1672             break;
1673         default:
1674         case 2:
1675             disp = (int32_t)x86_ldl_code(env, s);
1676             break;
1677         }
1678 
1679         /* For correct popl handling with esp.  */
1680         if (base == R_ESP && s->popl_esp_hack) {
1681             disp += s->popl_esp_hack;
1682         }
1683         if (base == R_EBP || base == R_ESP) {
1684             def_seg = R_SS;
1685         }
1686         break;
1687 
1688     case MO_16:
1689         if (mod == 0) {
1690             if (rm == 6) {
1691                 base = -1;
1692                 disp = x86_lduw_code(env, s);
1693                 break;
1694             }
1695         } else if (mod == 1) {
1696             disp = (int8_t)x86_ldub_code(env, s);
1697         } else {
1698             disp = (int16_t)x86_lduw_code(env, s);
1699         }
1700 
1701         switch (rm) {
1702         case 0:
1703             base = R_EBX;
1704             index = R_ESI;
1705             break;
1706         case 1:
1707             base = R_EBX;
1708             index = R_EDI;
1709             break;
1710         case 2:
1711             base = R_EBP;
1712             index = R_ESI;
1713             def_seg = R_SS;
1714             break;
1715         case 3:
1716             base = R_EBP;
1717             index = R_EDI;
1718             def_seg = R_SS;
1719             break;
1720         case 4:
1721             base = R_ESI;
1722             break;
1723         case 5:
1724             base = R_EDI;
1725             break;
1726         case 6:
1727             base = R_EBP;
1728             def_seg = R_SS;
1729             break;
1730         default:
1731         case 7:
1732             base = R_EBX;
1733             break;
1734         }
1735         break;
1736 
1737     default:
1738         g_assert_not_reached();
1739     }
1740 
1741  done:
1742     return (AddressParts){ def_seg, base, index, scale, disp };
1743 }
1744 
1745 /* Compute the address, with a minimum number of TCG ops.  */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1746 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1747 {
1748     TCGv ea = NULL;
1749 
1750     if (a.index >= 0 && !is_vsib) {
1751         if (a.scale == 0) {
1752             ea = cpu_regs[a.index];
1753         } else {
1754             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1755             ea = s->A0;
1756         }
1757         if (a.base >= 0) {
1758             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1759             ea = s->A0;
1760         }
1761     } else if (a.base >= 0) {
1762         ea = cpu_regs[a.base];
1763     }
1764     if (!ea) {
1765         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1766             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1767             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1768         } else {
1769             tcg_gen_movi_tl(s->A0, a.disp);
1770         }
1771         ea = s->A0;
1772     } else if (a.disp != 0) {
1773         tcg_gen_addi_tl(s->A0, ea, a.disp);
1774         ea = s->A0;
1775     }
1776 
1777     return ea;
1778 }
1779 
gen_lea_modrm(CPUX86State * env,DisasContext * s,int modrm)1780 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1781 {
1782     AddressParts a = gen_lea_modrm_0(env, s, modrm);
1783     TCGv ea = gen_lea_modrm_1(s, a, false);
1784     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
1785 }
1786 
gen_nop_modrm(CPUX86State * env,DisasContext * s,int modrm)1787 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1788 {
1789     (void)gen_lea_modrm_0(env, s, modrm);
1790 }
1791 
1792 /* Used for BNDCL, BNDCU, BNDCN.  */
gen_bndck(CPUX86State * env,DisasContext * s,int modrm,TCGCond cond,TCGv_i64 bndv)1793 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1794                       TCGCond cond, TCGv_i64 bndv)
1795 {
1796     AddressParts a = gen_lea_modrm_0(env, s, modrm);
1797     TCGv ea = gen_lea_modrm_1(s, a, false);
1798 
1799     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1800     if (!CODE64(s)) {
1801         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1802     }
1803     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1804     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1805     gen_helper_bndck(tcg_env, s->tmp2_i32);
1806 }
1807 
1808 /* used for LEA and MOV AX, mem */
gen_add_A0_ds_seg(DisasContext * s)1809 static void gen_add_A0_ds_seg(DisasContext *s)
1810 {
1811     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
1812 }
1813 
1814 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
1815    OR_TMP0 */
gen_ldst_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot,int reg,int is_store)1816 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
1817                            MemOp ot, int reg, int is_store)
1818 {
1819     int mod, rm;
1820 
1821     mod = (modrm >> 6) & 3;
1822     rm = (modrm & 7) | REX_B(s);
1823     if (mod == 3) {
1824         if (is_store) {
1825             if (reg != OR_TMP0)
1826                 gen_op_mov_v_reg(s, ot, s->T0, reg);
1827             gen_op_mov_reg_v(s, ot, rm, s->T0);
1828         } else {
1829             gen_op_mov_v_reg(s, ot, s->T0, rm);
1830             if (reg != OR_TMP0)
1831                 gen_op_mov_reg_v(s, ot, reg, s->T0);
1832         }
1833     } else {
1834         gen_lea_modrm(env, s, modrm);
1835         if (is_store) {
1836             if (reg != OR_TMP0)
1837                 gen_op_mov_v_reg(s, ot, s->T0, reg);
1838             gen_op_st_v(s, ot, s->T0, s->A0);
1839         } else {
1840             gen_op_ld_v(s, ot, s->T0, s->A0);
1841             if (reg != OR_TMP0)
1842                 gen_op_mov_reg_v(s, ot, reg, s->T0);
1843         }
1844     }
1845 }
1846 
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1847 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1848 {
1849     target_ulong ret;
1850 
1851     switch (ot) {
1852     case MO_8:
1853         ret = x86_ldub_code(env, s);
1854         break;
1855     case MO_16:
1856         ret = x86_lduw_code(env, s);
1857         break;
1858     case MO_32:
1859         ret = x86_ldl_code(env, s);
1860         break;
1861 #ifdef TARGET_X86_64
1862     case MO_64:
1863         ret = x86_ldq_code(env, s);
1864         break;
1865 #endif
1866     default:
1867         g_assert_not_reached();
1868     }
1869     return ret;
1870 }
1871 
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1872 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1873 {
1874     uint32_t ret;
1875 
1876     switch (ot) {
1877     case MO_8:
1878         ret = x86_ldub_code(env, s);
1879         break;
1880     case MO_16:
1881         ret = x86_lduw_code(env, s);
1882         break;
1883     case MO_32:
1884 #ifdef TARGET_X86_64
1885     case MO_64:
1886 #endif
1887         ret = x86_ldl_code(env, s);
1888         break;
1889     default:
1890         g_assert_not_reached();
1891     }
1892     return ret;
1893 }
1894 
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1895 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1896 {
1897     target_long ret;
1898 
1899     switch (ot) {
1900     case MO_8:
1901         ret = (int8_t) x86_ldub_code(env, s);
1902         break;
1903     case MO_16:
1904         ret = (int16_t) x86_lduw_code(env, s);
1905         break;
1906     case MO_32:
1907         ret = (int32_t) x86_ldl_code(env, s);
1908         break;
1909 #ifdef TARGET_X86_64
1910     case MO_64:
1911         ret = x86_ldq_code(env, s);
1912         break;
1913 #endif
1914     default:
1915         g_assert_not_reached();
1916     }
1917     return ret;
1918 }
1919 
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1920 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1921                                         TCGLabel *not_taken, TCGLabel *taken)
1922 {
1923     if (not_taken) {
1924         gen_set_label(not_taken);
1925     }
1926     gen_jmp_rel_csize(s, 0, 1);
1927 
1928     gen_set_label(taken);
1929     gen_jmp_rel(s, s->dflag, diff, 0);
1930 }
1931 
gen_jcc(DisasContext * s,int b,int diff)1932 static void gen_jcc(DisasContext *s, int b, int diff)
1933 {
1934     TCGLabel *l1 = gen_new_label();
1935 
1936     gen_jcc1(s, b, l1);
1937     gen_conditional_jump_labels(s, diff, NULL, l1);
1938 }
1939 
gen_cmovcc1(DisasContext * s,int b,TCGv dest,TCGv src)1940 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1941 {
1942     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1943 
1944     if (!cc.use_reg2) {
1945         cc.reg2 = tcg_constant_tl(cc.imm);
1946     }
1947 
1948     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1949 }
1950 
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1951 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1952 {
1953     TCGv selector = tcg_temp_new();
1954     tcg_gen_ext16u_tl(selector, seg);
1955     tcg_gen_st32_tl(selector, tcg_env,
1956                     offsetof(CPUX86State,segs[seg_reg].selector));
1957     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1958 }
1959 
1960 /* move SRC to seg_reg and compute if the CPU state may change. Never
1961    call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src)1962 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1963 {
1964     if (PE(s) && !VM86(s)) {
1965         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1966         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1967         /* abort translation because the addseg value may change or
1968            because ss32 may change. For R_SS, translation must always
1969            stop as a special handling must be done to disable hardware
1970            interrupts for the next instruction */
1971         if (seg_reg == R_SS) {
1972             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1973         } else if (CODE32(s) && seg_reg < R_FS) {
1974             s->base.is_jmp = DISAS_EOB_NEXT;
1975         }
1976     } else {
1977         gen_op_movl_seg_real(s, seg_reg, src);
1978         if (seg_reg == R_SS) {
1979             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1980         }
1981     }
1982 }
1983 
gen_far_call(DisasContext * s)1984 static void gen_far_call(DisasContext *s)
1985 {
1986     TCGv_i32 new_cs = tcg_temp_new_i32();
1987     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1988     if (PE(s) && !VM86(s)) {
1989         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1990                                    tcg_constant_i32(s->dflag - 1),
1991                                    eip_next_tl(s));
1992     } else {
1993         TCGv_i32 new_eip = tcg_temp_new_i32();
1994         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1995         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1996                               tcg_constant_i32(s->dflag - 1),
1997                               eip_next_i32(s));
1998     }
1999     s->base.is_jmp = DISAS_JUMP;
2000 }
2001 
gen_far_jmp(DisasContext * s)2002 static void gen_far_jmp(DisasContext *s)
2003 {
2004     if (PE(s) && !VM86(s)) {
2005         TCGv_i32 new_cs = tcg_temp_new_i32();
2006         tcg_gen_trunc_tl_i32(new_cs, s->T1);
2007         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
2008                                   eip_next_tl(s));
2009     } else {
2010         gen_op_movl_seg_real(s, R_CS, s->T1);
2011         gen_op_jmp_v(s, s->T0);
2012     }
2013     s->base.is_jmp = DISAS_JUMP;
2014 }
2015 
gen_svm_check_intercept(DisasContext * s,uint32_t type)2016 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2017 {
2018     /* no SVM activated; fast case */
2019     if (likely(!GUEST(s))) {
2020         return;
2021     }
2022     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2023 }
2024 
gen_stack_update(DisasContext * s,int addend)2025 static inline void gen_stack_update(DisasContext *s, int addend)
2026 {
2027     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2028 }
2029 
2030 /* Generate a push. It depends on ss32, addseg and dflag.  */
gen_push_v(DisasContext * s,TCGv val)2031 static void gen_push_v(DisasContext *s, TCGv val)
2032 {
2033     MemOp d_ot = mo_pushpop(s, s->dflag);
2034     MemOp a_ot = mo_stacksize(s);
2035     int size = 1 << d_ot;
2036     TCGv new_esp = s->A0;
2037 
2038     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2039 
2040     if (!CODE64(s)) {
2041         if (ADDSEG(s)) {
2042             new_esp = tcg_temp_new();
2043             tcg_gen_mov_tl(new_esp, s->A0);
2044         }
2045         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2046     }
2047 
2048     gen_op_st_v(s, d_ot, val, s->A0);
2049     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2050 }
2051 
2052 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)2053 static MemOp gen_pop_T0(DisasContext *s)
2054 {
2055     MemOp d_ot = mo_pushpop(s, s->dflag);
2056 
2057     gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
2058     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2059 
2060     return d_ot;
2061 }
2062 
gen_pop_update(DisasContext * s,MemOp ot)2063 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2064 {
2065     gen_stack_update(s, 1 << ot);
2066 }
2067 
gen_stack_A0(DisasContext * s)2068 static inline void gen_stack_A0(DisasContext *s)
2069 {
2070     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2071 }
2072 
gen_pusha(DisasContext * s)2073 static void gen_pusha(DisasContext *s)
2074 {
2075     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2076     MemOp d_ot = s->dflag;
2077     int size = 1 << d_ot;
2078     int i;
2079 
2080     for (i = 0; i < 8; i++) {
2081         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2082         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2083         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2084     }
2085 
2086     gen_stack_update(s, -8 * size);
2087 }
2088 
gen_popa(DisasContext * s)2089 static void gen_popa(DisasContext *s)
2090 {
2091     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2092     MemOp d_ot = s->dflag;
2093     int size = 1 << d_ot;
2094     int i;
2095 
2096     for (i = 0; i < 8; i++) {
2097         /* ESP is not reloaded */
2098         if (7 - i == R_ESP) {
2099             continue;
2100         }
2101         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2102         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2103         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2104         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2105     }
2106 
2107     gen_stack_update(s, 8 * size);
2108 }
2109 
gen_enter(DisasContext * s,int esp_addend,int level)2110 static void gen_enter(DisasContext *s, int esp_addend, int level)
2111 {
2112     MemOp d_ot = mo_pushpop(s, s->dflag);
2113     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2114     int size = 1 << d_ot;
2115 
2116     /* Push BP; compute FrameTemp into T1.  */
2117     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2118     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2119     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2120 
2121     level &= 31;
2122     if (level != 0) {
2123         int i;
2124 
2125         /* Copy level-1 pointers from the previous frame.  */
2126         for (i = 1; i < level; ++i) {
2127             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2128             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2129             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2130 
2131             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2132             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2133             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2134         }
2135 
2136         /* Push the current FrameTemp as the last level.  */
2137         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2138         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2139         gen_op_st_v(s, d_ot, s->T1, s->A0);
2140     }
2141 
2142     /* Copy the FrameTemp value to EBP.  */
2143     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2144 
2145     /* Compute the final value of ESP.  */
2146     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2147     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2148 }
2149 
gen_leave(DisasContext * s)2150 static void gen_leave(DisasContext *s)
2151 {
2152     MemOp d_ot = mo_pushpop(s, s->dflag);
2153     MemOp a_ot = mo_stacksize(s);
2154 
2155     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2156     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2157 
2158     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2159 
2160     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2161     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2162 }
2163 
2164 /* Similarly, except that the assumption here is that we don't decode
2165    the instruction at all -- either a missing opcode, an unimplemented
2166    feature, or just a bogus instruction stream.  */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2167 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2168 {
2169     gen_illegal_opcode(s);
2170 
2171     if (qemu_loglevel_mask(LOG_UNIMP)) {
2172         FILE *logfile = qemu_log_trylock();
2173         if (logfile) {
2174             target_ulong pc = s->base.pc_next, end = s->pc;
2175 
2176             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2177             for (; pc < end; ++pc) {
2178                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2179             }
2180             fprintf(logfile, "\n");
2181             qemu_log_unlock(logfile);
2182         }
2183     }
2184 }
2185 
2186 /* an interrupt is different from an exception because of the
2187    privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2188 static void gen_interrupt(DisasContext *s, uint8_t intno)
2189 {
2190     gen_update_cc_op(s);
2191     gen_update_eip_cur(s);
2192     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2193                                cur_insn_len_i32(s));
2194     s->base.is_jmp = DISAS_NORETURN;
2195 }
2196 
gen_set_hflag(DisasContext * s,uint32_t mask)2197 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2198 {
2199     if ((s->flags & mask) == 0) {
2200         TCGv_i32 t = tcg_temp_new_i32();
2201         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2202         tcg_gen_ori_i32(t, t, mask);
2203         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2204         s->flags |= mask;
2205     }
2206 }
2207 
gen_reset_hflag(DisasContext * s,uint32_t mask)2208 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2209 {
2210     if (s->flags & mask) {
2211         TCGv_i32 t = tcg_temp_new_i32();
2212         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2213         tcg_gen_andi_i32(t, t, ~mask);
2214         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2215         s->flags &= ~mask;
2216     }
2217 }
2218 
gen_set_eflags(DisasContext * s,target_ulong mask)2219 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2220 {
2221     TCGv t = tcg_temp_new();
2222 
2223     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2224     tcg_gen_ori_tl(t, t, mask);
2225     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2226 }
2227 
gen_reset_eflags(DisasContext * s,target_ulong mask)2228 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2229 {
2230     TCGv t = tcg_temp_new();
2231 
2232     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2233     tcg_gen_andi_tl(t, t, ~mask);
2234     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2235 }
2236 
2237 /* Clear BND registers during legacy branches.  */
gen_bnd_jmp(DisasContext * s)2238 static void gen_bnd_jmp(DisasContext *s)
2239 {
2240     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2241        and if the BNDREGs are known to be in use (non-zero) already.
2242        The helper itself will check BNDPRESERVE at runtime.  */
2243     if ((s->prefix & PREFIX_REPNZ) == 0
2244         && (s->flags & HF_MPX_EN_MASK) != 0
2245         && (s->flags & HF_MPX_IU_MASK) != 0) {
2246         gen_helper_bnd_jmp(tcg_env);
2247     }
2248 }
2249 
2250 /* Generate an end of block. Trace exception is also generated if needed.
2251    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2252    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2253    S->TF.  This is used by the syscall/sysret insns.  */
2254 static void
gen_eob_worker(DisasContext * s,bool inhibit,bool recheck_tf,bool jr)2255 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2256 {
2257     bool inhibit_reset;
2258 
2259     gen_update_cc_op(s);
2260 
2261     /* If several instructions disable interrupts, only the first does it.  */
2262     inhibit_reset = false;
2263     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2264         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2265         inhibit_reset = true;
2266     } else if (inhibit) {
2267         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2268     }
2269 
2270     if (s->base.tb->flags & HF_RF_MASK) {
2271         gen_reset_eflags(s, RF_MASK);
2272     }
2273     if (recheck_tf) {
2274         gen_helper_rechecking_single_step(tcg_env);
2275         tcg_gen_exit_tb(NULL, 0);
2276     } else if (s->flags & HF_TF_MASK) {
2277         gen_helper_single_step(tcg_env);
2278     } else if (jr &&
2279                /* give irqs a chance to happen */
2280                !inhibit_reset) {
2281         tcg_gen_lookup_and_goto_ptr();
2282     } else {
2283         tcg_gen_exit_tb(NULL, 0);
2284     }
2285     s->base.is_jmp = DISAS_NORETURN;
2286 }
2287 
2288 static inline void
gen_eob_syscall(DisasContext * s)2289 gen_eob_syscall(DisasContext *s)
2290 {
2291     gen_eob_worker(s, false, true, false);
2292 }
2293 
2294 /* End of block.  Set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
gen_eob_inhibit_irq(DisasContext * s)2295 static void gen_eob_inhibit_irq(DisasContext *s)
2296 {
2297     gen_eob_worker(s, true, false, false);
2298 }
2299 
2300 /* End of block, resetting the inhibit irq flag.  */
gen_eob(DisasContext * s)2301 static void gen_eob(DisasContext *s)
2302 {
2303     gen_eob_worker(s, false, false, false);
2304 }
2305 
2306 /* Jump to register */
gen_jr(DisasContext * s)2307 static void gen_jr(DisasContext *s)
2308 {
2309     gen_eob_worker(s, false, false, true);
2310 }
2311 
2312 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2313 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2314 {
2315     bool use_goto_tb = s->jmp_opt;
2316     target_ulong mask = -1;
2317     target_ulong new_pc = s->pc + diff;
2318     target_ulong new_eip = new_pc - s->cs_base;
2319 
2320     assert(!s->cc_op_dirty);
2321 
2322     /* In 64-bit mode, operand size is fixed at 64 bits. */
2323     if (!CODE64(s)) {
2324         if (ot == MO_16) {
2325             mask = 0xffff;
2326             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2327                 use_goto_tb = false;
2328             }
2329         } else {
2330             mask = 0xffffffff;
2331         }
2332     }
2333     new_eip &= mask;
2334 
2335     if (tb_cflags(s->base.tb) & CF_PCREL) {
2336         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2337         /*
2338          * If we can prove the branch does not leave the page and we have
2339          * no extra masking to apply (data16 branch in code32, see above),
2340          * then we have also proven that the addition does not wrap.
2341          */
2342         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2343             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2344             use_goto_tb = false;
2345         }
2346     } else if (!CODE64(s)) {
2347         new_pc = (uint32_t)(new_eip + s->cs_base);
2348     }
2349 
2350     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2351         /* jump to same page: we can use a direct jump */
2352         tcg_gen_goto_tb(tb_num);
2353         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2354             tcg_gen_movi_tl(cpu_eip, new_eip);
2355         }
2356         tcg_gen_exit_tb(s->base.tb, tb_num);
2357         s->base.is_jmp = DISAS_NORETURN;
2358     } else {
2359         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2360             tcg_gen_movi_tl(cpu_eip, new_eip);
2361         }
2362         if (s->jmp_opt) {
2363             gen_jr(s);   /* jump to another page */
2364         } else {
2365             gen_eob(s);  /* exit to main loop */
2366         }
2367     }
2368 }
2369 
2370 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2371 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2372 {
2373     /* CODE64 ignores the OT argument, so we need not consider it. */
2374     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2375 }
2376 
gen_ldq_env_A0(DisasContext * s,int offset)2377 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2378 {
2379     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2380     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2381 }
2382 
gen_stq_env_A0(DisasContext * s,int offset)2383 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2384 {
2385     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2386     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2387 }
2388 
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2389 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2390 {
2391     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2392                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2393     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2394     int mem_index = s->mem_index;
2395     TCGv_i128 t = tcg_temp_new_i128();
2396 
2397     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2398     tcg_gen_st_i128(t, tcg_env, offset);
2399 }
2400 
gen_sto_env_A0(DisasContext * s,int offset,bool align)2401 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2402 {
2403     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2404                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2405     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2406     int mem_index = s->mem_index;
2407     TCGv_i128 t = tcg_temp_new_i128();
2408 
2409     tcg_gen_ld_i128(t, tcg_env, offset);
2410     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2411 }
2412 
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2413 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2414 {
2415     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2416     int mem_index = s->mem_index;
2417     TCGv_i128 t0 = tcg_temp_new_i128();
2418     TCGv_i128 t1 = tcg_temp_new_i128();
2419 
2420     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2421     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2422     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2423 
2424     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2425     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2426 }
2427 
gen_sty_env_A0(DisasContext * s,int offset,bool align)2428 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2429 {
2430     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2431     int mem_index = s->mem_index;
2432     TCGv_i128 t = tcg_temp_new_i128();
2433 
2434     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2435     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2436     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2437     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2438     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2439 }
2440 
gen_cmpxchg8b(DisasContext * s,CPUX86State * env,int modrm)2441 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2442 {
2443     TCGv_i64 cmp, val, old;
2444     TCGv Z;
2445 
2446     gen_lea_modrm(env, s, modrm);
2447 
2448     cmp = tcg_temp_new_i64();
2449     val = tcg_temp_new_i64();
2450     old = tcg_temp_new_i64();
2451 
2452     /* Construct the comparison values from the register pair. */
2453     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2454     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2455 
2456     /* Only require atomic with LOCK; non-parallel handled in generator. */
2457     if (s->prefix & PREFIX_LOCK) {
2458         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2459     } else {
2460         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2461                                       s->mem_index, MO_TEUQ);
2462     }
2463 
2464     /* Set tmp0 to match the required value of Z. */
2465     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
2466     Z = tcg_temp_new();
2467     tcg_gen_trunc_i64_tl(Z, cmp);
2468 
2469     /*
2470      * Extract the result values for the register pair.
2471      * For 32-bit, we may do this unconditionally, because on success (Z=1),
2472      * the old value matches the previous value in EDX:EAX.  For x86_64,
2473      * the store must be conditional, because we must leave the source
2474      * registers unchanged on success, and zero-extend the writeback
2475      * on failure (Z=0).
2476      */
2477     if (TARGET_LONG_BITS == 32) {
2478         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
2479     } else {
2480         TCGv zero = tcg_constant_tl(0);
2481 
2482         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
2483         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
2484                            s->T0, cpu_regs[R_EAX]);
2485         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
2486                            s->T1, cpu_regs[R_EDX]);
2487     }
2488 
2489     /* Update Z. */
2490     gen_compute_eflags(s);
2491     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
2492 }
2493 
2494 #ifdef TARGET_X86_64
gen_cmpxchg16b(DisasContext * s,CPUX86State * env,int modrm)2495 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
2496 {
2497     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
2498     TCGv_i64 t0, t1;
2499     TCGv_i128 cmp, val;
2500 
2501     gen_lea_modrm(env, s, modrm);
2502 
2503     cmp = tcg_temp_new_i128();
2504     val = tcg_temp_new_i128();
2505     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2506     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2507 
2508     /* Only require atomic with LOCK; non-parallel handled in generator. */
2509     if (s->prefix & PREFIX_LOCK) {
2510         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2511     } else {
2512         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2513     }
2514 
2515     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
2516 
2517     /* Determine success after the fact. */
2518     t0 = tcg_temp_new_i64();
2519     t1 = tcg_temp_new_i64();
2520     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
2521     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
2522     tcg_gen_or_i64(t0, t0, t1);
2523 
2524     /* Update Z. */
2525     gen_compute_eflags(s);
2526     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
2527     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
2528 
2529     /*
2530      * Extract the result values for the register pair.  We may do this
2531      * unconditionally, because on success (Z=1), the old value matches
2532      * the previous value in RDX:RAX.
2533      */
2534     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
2535     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
2536 }
2537 #endif
2538 
disas_insn_x87(DisasContext * s,CPUState * cpu,int b)2539 static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
2540 {
2541     CPUX86State *env = cpu_env(cpu);
2542     bool update_fip = true;
2543     int modrm, mod, rm, op;
2544 
2545     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2546         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2547         /* XXX: what to do if illegal op ? */
2548         gen_exception(s, EXCP07_PREX);
2549         return true;
2550     }
2551     modrm = x86_ldub_code(env, s);
2552     mod = (modrm >> 6) & 3;
2553     rm = modrm & 7;
2554     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2555     if (mod != 3) {
2556         /* memory op */
2557         AddressParts a = gen_lea_modrm_0(env, s, modrm);
2558         TCGv ea = gen_lea_modrm_1(s, a, false);
2559         TCGv last_addr = tcg_temp_new();
2560         bool update_fdp = true;
2561 
2562         tcg_gen_mov_tl(last_addr, ea);
2563         gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2564 
2565         switch (op) {
2566         case 0x00 ... 0x07: /* fxxxs */
2567         case 0x10 ... 0x17: /* fixxxl */
2568         case 0x20 ... 0x27: /* fxxxl */
2569         case 0x30 ... 0x37: /* fixxx */
2570             {
2571                 int op1;
2572                 op1 = op & 7;
2573 
2574                 switch (op >> 4) {
2575                 case 0:
2576                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2577                                         s->mem_index, MO_LEUL);
2578                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2579                     break;
2580                 case 1:
2581                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2582                                         s->mem_index, MO_LEUL);
2583                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2584                     break;
2585                 case 2:
2586                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2587                                         s->mem_index, MO_LEUQ);
2588                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2589                     break;
2590                 case 3:
2591                 default:
2592                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2593                                         s->mem_index, MO_LESW);
2594                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2595                     break;
2596                 }
2597 
2598                 gen_helper_fp_arith_ST0_FT0(op1);
2599                 if (op1 == 3) {
2600                     /* fcomp needs pop */
2601                     gen_helper_fpop(tcg_env);
2602                 }
2603             }
2604             break;
2605         case 0x08: /* flds */
2606         case 0x0a: /* fsts */
2607         case 0x0b: /* fstps */
2608         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2609         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2610         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2611             switch (op & 7) {
2612             case 0:
2613                 switch (op >> 4) {
2614                 case 0:
2615                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2616                                         s->mem_index, MO_LEUL);
2617                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2618                     break;
2619                 case 1:
2620                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2621                                         s->mem_index, MO_LEUL);
2622                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2623                     break;
2624                 case 2:
2625                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2626                                         s->mem_index, MO_LEUQ);
2627                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2628                     break;
2629                 case 3:
2630                 default:
2631                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2632                                         s->mem_index, MO_LESW);
2633                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2634                     break;
2635                 }
2636                 break;
2637             case 1:
2638                 /* XXX: the corresponding CPUID bit must be tested ! */
2639                 switch (op >> 4) {
2640                 case 1:
2641                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2642                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2643                                         s->mem_index, MO_LEUL);
2644                     break;
2645                 case 2:
2646                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2647                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2648                                         s->mem_index, MO_LEUQ);
2649                     break;
2650                 case 3:
2651                 default:
2652                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2653                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2654                                         s->mem_index, MO_LEUW);
2655                     break;
2656                 }
2657                 gen_helper_fpop(tcg_env);
2658                 break;
2659             default:
2660                 switch (op >> 4) {
2661                 case 0:
2662                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2663                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2664                                         s->mem_index, MO_LEUL);
2665                     break;
2666                 case 1:
2667                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2668                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2669                                         s->mem_index, MO_LEUL);
2670                     break;
2671                 case 2:
2672                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2673                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2674                                         s->mem_index, MO_LEUQ);
2675                     break;
2676                 case 3:
2677                 default:
2678                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2679                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2680                                         s->mem_index, MO_LEUW);
2681                     break;
2682                 }
2683                 if ((op & 7) == 3) {
2684                     gen_helper_fpop(tcg_env);
2685                 }
2686                 break;
2687             }
2688             break;
2689         case 0x0c: /* fldenv mem */
2690             gen_helper_fldenv(tcg_env, s->A0,
2691                               tcg_constant_i32(s->dflag - 1));
2692             update_fip = update_fdp = false;
2693             break;
2694         case 0x0d: /* fldcw mem */
2695             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2696                                 s->mem_index, MO_LEUW);
2697             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2698             update_fip = update_fdp = false;
2699             break;
2700         case 0x0e: /* fnstenv mem */
2701             gen_helper_fstenv(tcg_env, s->A0,
2702                               tcg_constant_i32(s->dflag - 1));
2703             update_fip = update_fdp = false;
2704             break;
2705         case 0x0f: /* fnstcw mem */
2706             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2707             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2708                                 s->mem_index, MO_LEUW);
2709             update_fip = update_fdp = false;
2710             break;
2711         case 0x1d: /* fldt mem */
2712             gen_helper_fldt_ST0(tcg_env, s->A0);
2713             break;
2714         case 0x1f: /* fstpt mem */
2715             gen_helper_fstt_ST0(tcg_env, s->A0);
2716             gen_helper_fpop(tcg_env);
2717             break;
2718         case 0x2c: /* frstor mem */
2719             gen_helper_frstor(tcg_env, s->A0,
2720                               tcg_constant_i32(s->dflag - 1));
2721             update_fip = update_fdp = false;
2722             break;
2723         case 0x2e: /* fnsave mem */
2724             gen_helper_fsave(tcg_env, s->A0,
2725                              tcg_constant_i32(s->dflag - 1));
2726             update_fip = update_fdp = false;
2727             break;
2728         case 0x2f: /* fnstsw mem */
2729             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2730             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2731                                 s->mem_index, MO_LEUW);
2732             update_fip = update_fdp = false;
2733             break;
2734         case 0x3c: /* fbld */
2735             gen_helper_fbld_ST0(tcg_env, s->A0);
2736             break;
2737         case 0x3e: /* fbstp */
2738             gen_helper_fbst_ST0(tcg_env, s->A0);
2739             gen_helper_fpop(tcg_env);
2740             break;
2741         case 0x3d: /* fildll */
2742             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2743                                 s->mem_index, MO_LEUQ);
2744             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2745             break;
2746         case 0x3f: /* fistpll */
2747             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2748             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2749                                 s->mem_index, MO_LEUQ);
2750             gen_helper_fpop(tcg_env);
2751             break;
2752         default:
2753             return false;
2754         }
2755 
2756         if (update_fdp) {
2757             int last_seg = s->override >= 0 ? s->override : a.def_seg;
2758 
2759             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2760                            offsetof(CPUX86State,
2761                                     segs[last_seg].selector));
2762             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2763                              offsetof(CPUX86State, fpds));
2764             tcg_gen_st_tl(last_addr, tcg_env,
2765                           offsetof(CPUX86State, fpdp));
2766         }
2767     } else {
2768         /* register float ops */
2769         int opreg = rm;
2770 
2771         switch (op) {
2772         case 0x08: /* fld sti */
2773             gen_helper_fpush(tcg_env);
2774             gen_helper_fmov_ST0_STN(tcg_env,
2775                                     tcg_constant_i32((opreg + 1) & 7));
2776             break;
2777         case 0x09: /* fxchg sti */
2778         case 0x29: /* fxchg4 sti, undocumented op */
2779         case 0x39: /* fxchg7 sti, undocumented op */
2780             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2781             break;
2782         case 0x0a: /* grp d9/2 */
2783             switch (rm) {
2784             case 0: /* fnop */
2785                 /*
2786                  * check exceptions (FreeBSD FPU probe)
2787                  * needs to be treated as I/O because of ferr_irq
2788                  */
2789                 translator_io_start(&s->base);
2790                 gen_helper_fwait(tcg_env);
2791                 update_fip = false;
2792                 break;
2793             default:
2794                 return false;
2795             }
2796             break;
2797         case 0x0c: /* grp d9/4 */
2798             switch (rm) {
2799             case 0: /* fchs */
2800                 gen_helper_fchs_ST0(tcg_env);
2801                 break;
2802             case 1: /* fabs */
2803                 gen_helper_fabs_ST0(tcg_env);
2804                 break;
2805             case 4: /* ftst */
2806                 gen_helper_fldz_FT0(tcg_env);
2807                 gen_helper_fcom_ST0_FT0(tcg_env);
2808                 break;
2809             case 5: /* fxam */
2810                 gen_helper_fxam_ST0(tcg_env);
2811                 break;
2812             default:
2813                 return false;
2814             }
2815             break;
2816         case 0x0d: /* grp d9/5 */
2817             {
2818                 switch (rm) {
2819                 case 0:
2820                     gen_helper_fpush(tcg_env);
2821                     gen_helper_fld1_ST0(tcg_env);
2822                     break;
2823                 case 1:
2824                     gen_helper_fpush(tcg_env);
2825                     gen_helper_fldl2t_ST0(tcg_env);
2826                     break;
2827                 case 2:
2828                     gen_helper_fpush(tcg_env);
2829                     gen_helper_fldl2e_ST0(tcg_env);
2830                     break;
2831                 case 3:
2832                     gen_helper_fpush(tcg_env);
2833                     gen_helper_fldpi_ST0(tcg_env);
2834                     break;
2835                 case 4:
2836                     gen_helper_fpush(tcg_env);
2837                     gen_helper_fldlg2_ST0(tcg_env);
2838                     break;
2839                 case 5:
2840                     gen_helper_fpush(tcg_env);
2841                     gen_helper_fldln2_ST0(tcg_env);
2842                     break;
2843                 case 6:
2844                     gen_helper_fpush(tcg_env);
2845                     gen_helper_fldz_ST0(tcg_env);
2846                     break;
2847                 default:
2848                     return false;
2849                 }
2850             }
2851             break;
2852         case 0x0e: /* grp d9/6 */
2853             switch (rm) {
2854             case 0: /* f2xm1 */
2855                 gen_helper_f2xm1(tcg_env);
2856                 break;
2857             case 1: /* fyl2x */
2858                 gen_helper_fyl2x(tcg_env);
2859                 break;
2860             case 2: /* fptan */
2861                 gen_helper_fptan(tcg_env);
2862                 break;
2863             case 3: /* fpatan */
2864                 gen_helper_fpatan(tcg_env);
2865                 break;
2866             case 4: /* fxtract */
2867                 gen_helper_fxtract(tcg_env);
2868                 break;
2869             case 5: /* fprem1 */
2870                 gen_helper_fprem1(tcg_env);
2871                 break;
2872             case 6: /* fdecstp */
2873                 gen_helper_fdecstp(tcg_env);
2874                 break;
2875             default:
2876             case 7: /* fincstp */
2877                 gen_helper_fincstp(tcg_env);
2878                 break;
2879             }
2880             break;
2881         case 0x0f: /* grp d9/7 */
2882             switch (rm) {
2883             case 0: /* fprem */
2884                 gen_helper_fprem(tcg_env);
2885                 break;
2886             case 1: /* fyl2xp1 */
2887                 gen_helper_fyl2xp1(tcg_env);
2888                 break;
2889             case 2: /* fsqrt */
2890                 gen_helper_fsqrt(tcg_env);
2891                 break;
2892             case 3: /* fsincos */
2893                 gen_helper_fsincos(tcg_env);
2894                 break;
2895             case 5: /* fscale */
2896                 gen_helper_fscale(tcg_env);
2897                 break;
2898             case 4: /* frndint */
2899                 gen_helper_frndint(tcg_env);
2900                 break;
2901             case 6: /* fsin */
2902                 gen_helper_fsin(tcg_env);
2903                 break;
2904             default:
2905             case 7: /* fcos */
2906                 gen_helper_fcos(tcg_env);
2907                 break;
2908             }
2909             break;
2910         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2911         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2912         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2913             {
2914                 int op1;
2915 
2916                 op1 = op & 7;
2917                 if (op >= 0x20) {
2918                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2919                     if (op >= 0x30) {
2920                         gen_helper_fpop(tcg_env);
2921                     }
2922                 } else {
2923                     gen_helper_fmov_FT0_STN(tcg_env,
2924                                             tcg_constant_i32(opreg));
2925                     gen_helper_fp_arith_ST0_FT0(op1);
2926                 }
2927             }
2928             break;
2929         case 0x02: /* fcom */
2930         case 0x22: /* fcom2, undocumented op */
2931             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2932             gen_helper_fcom_ST0_FT0(tcg_env);
2933             break;
2934         case 0x03: /* fcomp */
2935         case 0x23: /* fcomp3, undocumented op */
2936         case 0x32: /* fcomp5, undocumented op */
2937             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2938             gen_helper_fcom_ST0_FT0(tcg_env);
2939             gen_helper_fpop(tcg_env);
2940             break;
2941         case 0x15: /* da/5 */
2942             switch (rm) {
2943             case 1: /* fucompp */
2944                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2945                 gen_helper_fucom_ST0_FT0(tcg_env);
2946                 gen_helper_fpop(tcg_env);
2947                 gen_helper_fpop(tcg_env);
2948                 break;
2949             default:
2950                 return false;
2951             }
2952             break;
2953         case 0x1c:
2954             switch (rm) {
2955             case 0: /* feni (287 only, just do nop here) */
2956                 break;
2957             case 1: /* fdisi (287 only, just do nop here) */
2958                 break;
2959             case 2: /* fclex */
2960                 gen_helper_fclex(tcg_env);
2961                 update_fip = false;
2962                 break;
2963             case 3: /* fninit */
2964                 gen_helper_fninit(tcg_env);
2965                 update_fip = false;
2966                 break;
2967             case 4: /* fsetpm (287 only, just do nop here) */
2968                 break;
2969             default:
2970                 return false;
2971             }
2972             break;
2973         case 0x1d: /* fucomi */
2974             if (!(s->cpuid_features & CPUID_CMOV)) {
2975                 goto illegal_op;
2976             }
2977             gen_update_cc_op(s);
2978             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2979             gen_helper_fucomi_ST0_FT0(tcg_env);
2980             set_cc_op(s, CC_OP_EFLAGS);
2981             break;
2982         case 0x1e: /* fcomi */
2983             if (!(s->cpuid_features & CPUID_CMOV)) {
2984                 goto illegal_op;
2985             }
2986             gen_update_cc_op(s);
2987             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2988             gen_helper_fcomi_ST0_FT0(tcg_env);
2989             set_cc_op(s, CC_OP_EFLAGS);
2990             break;
2991         case 0x28: /* ffree sti */
2992             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2993             break;
2994         case 0x2a: /* fst sti */
2995             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2996             break;
2997         case 0x2b: /* fstp sti */
2998         case 0x0b: /* fstp1 sti, undocumented op */
2999         case 0x3a: /* fstp8 sti, undocumented op */
3000         case 0x3b: /* fstp9 sti, undocumented op */
3001             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
3002             gen_helper_fpop(tcg_env);
3003             break;
3004         case 0x2c: /* fucom st(i) */
3005             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
3006             gen_helper_fucom_ST0_FT0(tcg_env);
3007             break;
3008         case 0x2d: /* fucomp st(i) */
3009             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
3010             gen_helper_fucom_ST0_FT0(tcg_env);
3011             gen_helper_fpop(tcg_env);
3012             break;
3013         case 0x33: /* de/3 */
3014             switch (rm) {
3015             case 1: /* fcompp */
3016                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
3017                 gen_helper_fcom_ST0_FT0(tcg_env);
3018                 gen_helper_fpop(tcg_env);
3019                 gen_helper_fpop(tcg_env);
3020                 break;
3021             default:
3022                 return false;
3023             }
3024             break;
3025         case 0x38: /* ffreep sti, undocumented op */
3026             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
3027             gen_helper_fpop(tcg_env);
3028             break;
3029         case 0x3c: /* df/4 */
3030             switch (rm) {
3031             case 0:
3032                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
3033                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
3034                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3035                 break;
3036             default:
3037                 return false;
3038             }
3039             break;
3040         case 0x3d: /* fucomip */
3041             if (!(s->cpuid_features & CPUID_CMOV)) {
3042                 goto illegal_op;
3043             }
3044             gen_update_cc_op(s);
3045             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
3046             gen_helper_fucomi_ST0_FT0(tcg_env);
3047             gen_helper_fpop(tcg_env);
3048             set_cc_op(s, CC_OP_EFLAGS);
3049             break;
3050         case 0x3e: /* fcomip */
3051             if (!(s->cpuid_features & CPUID_CMOV)) {
3052                 goto illegal_op;
3053             }
3054             gen_update_cc_op(s);
3055             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
3056             gen_helper_fcomi_ST0_FT0(tcg_env);
3057             gen_helper_fpop(tcg_env);
3058             set_cc_op(s, CC_OP_EFLAGS);
3059             break;
3060         case 0x10 ... 0x13: /* fcmovxx */
3061         case 0x18 ... 0x1b:
3062             {
3063                 int op1;
3064                 TCGLabel *l1;
3065                 static const uint8_t fcmov_cc[8] = {
3066                     (JCC_B << 1),
3067                     (JCC_Z << 1),
3068                     (JCC_BE << 1),
3069                     (JCC_P << 1),
3070                 };
3071 
3072                 if (!(s->cpuid_features & CPUID_CMOV)) {
3073                     goto illegal_op;
3074                 }
3075                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
3076                 l1 = gen_new_label();
3077                 gen_jcc1_noeob(s, op1, l1);
3078                 gen_helper_fmov_ST0_STN(tcg_env,
3079                                         tcg_constant_i32(opreg));
3080                 gen_set_label(l1);
3081             }
3082             break;
3083         default:
3084             return false;
3085         }
3086     }
3087 
3088     if (update_fip) {
3089         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
3090                        offsetof(CPUX86State, segs[R_CS].selector));
3091         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
3092                          offsetof(CPUX86State, fpcs));
3093         tcg_gen_st_tl(eip_cur_tl(s),
3094                       tcg_env, offsetof(CPUX86State, fpip));
3095     }
3096     return true;
3097 
3098  illegal_op:
3099     gen_illegal_opcode(s);
3100     return true;
3101 }
3102 
disas_insn_old(DisasContext * s,CPUState * cpu,int b)3103 static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
3104 {
3105     CPUX86State *env = cpu_env(cpu);
3106     int prefixes = s->prefix;
3107     MemOp dflag = s->dflag;
3108     int shift;
3109     MemOp ot;
3110     int modrm, reg, rm, mod, op, opreg, val;
3111 
3112     /* now check op code */
3113     switch (b) {
3114         /**************************/
3115         /* arith & logic */
3116     case 0x1c0:
3117     case 0x1c1: /* xadd Ev, Gv */
3118         ot = mo_b_d(b, dflag);
3119         modrm = x86_ldub_code(env, s);
3120         reg = ((modrm >> 3) & 7) | REX_R(s);
3121         mod = (modrm >> 6) & 3;
3122         gen_op_mov_v_reg(s, ot, s->T0, reg);
3123         if (mod == 3) {
3124             rm = (modrm & 7) | REX_B(s);
3125             gen_op_mov_v_reg(s, ot, s->T1, rm);
3126             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3127             gen_op_mov_reg_v(s, ot, reg, s->T1);
3128             gen_op_mov_reg_v(s, ot, rm, s->T0);
3129         } else {
3130             gen_lea_modrm(env, s, modrm);
3131             if (s->prefix & PREFIX_LOCK) {
3132                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3133                                             s->mem_index, ot | MO_LE);
3134                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3135             } else {
3136                 gen_op_ld_v(s, ot, s->T1, s->A0);
3137                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3138                 gen_op_st_v(s, ot, s->T0, s->A0);
3139             }
3140             gen_op_mov_reg_v(s, ot, reg, s->T1);
3141         }
3142         gen_op_update2_cc(s);
3143         set_cc_op(s, CC_OP_ADDB + ot);
3144         break;
3145     case 0x1b0:
3146     case 0x1b1: /* cmpxchg Ev, Gv */
3147         {
3148             TCGv oldv, newv, cmpv, dest;
3149 
3150             ot = mo_b_d(b, dflag);
3151             modrm = x86_ldub_code(env, s);
3152             reg = ((modrm >> 3) & 7) | REX_R(s);
3153             mod = (modrm >> 6) & 3;
3154             oldv = tcg_temp_new();
3155             newv = tcg_temp_new();
3156             cmpv = tcg_temp_new();
3157             gen_op_mov_v_reg(s, ot, newv, reg);
3158             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3159             gen_extu(ot, cmpv);
3160             if (s->prefix & PREFIX_LOCK) {
3161                 if (mod == 3) {
3162                     goto illegal_op;
3163                 }
3164                 gen_lea_modrm(env, s, modrm);
3165                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3166                                           s->mem_index, ot | MO_LE);
3167             } else {
3168                 if (mod == 3) {
3169                     rm = (modrm & 7) | REX_B(s);
3170                     gen_op_mov_v_reg(s, ot, oldv, rm);
3171                     gen_extu(ot, oldv);
3172 
3173                     /*
3174                      * Unlike the memory case, where "the destination operand receives
3175                      * a write cycle without regard to the result of the comparison",
3176                      * rm must not be touched altogether if the write fails, including
3177                      * not zero-extending it on 64-bit processors.  So, precompute
3178                      * the result of a successful writeback and perform the movcond
3179                      * directly on cpu_regs.  Also need to write accumulator first, in
3180                      * case rm is part of RAX too.
3181                      */
3182                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3183                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3184                 } else {
3185                     gen_lea_modrm(env, s, modrm);
3186                     gen_op_ld_v(s, ot, oldv, s->A0);
3187 
3188                     /*
3189                      * Perform an unconditional store cycle like physical cpu;
3190                      * must be before changing accumulator to ensure
3191                      * idempotency if the store faults and the instruction
3192                      * is restarted
3193                      */
3194                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3195                     gen_op_st_v(s, ot, newv, s->A0);
3196                 }
3197             }
3198 	    /*
3199 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3200 	     * since it's dead here.
3201 	     */
3202             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3203             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3204             tcg_gen_mov_tl(cpu_cc_src, oldv);
3205             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3206             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3207             set_cc_op(s, CC_OP_SUBB + ot);
3208         }
3209         break;
3210     case 0x1c7: /* cmpxchg8b */
3211         modrm = x86_ldub_code(env, s);
3212         mod = (modrm >> 6) & 3;
3213         switch ((modrm >> 3) & 7) {
3214         case 1: /* CMPXCHG8, CMPXCHG16 */
3215             if (mod == 3) {
3216                 goto illegal_op;
3217             }
3218 #ifdef TARGET_X86_64
3219             if (dflag == MO_64) {
3220                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3221                     goto illegal_op;
3222                 }
3223                 gen_cmpxchg16b(s, env, modrm);
3224                 break;
3225             }
3226 #endif
3227             if (!(s->cpuid_features & CPUID_CX8)) {
3228                 goto illegal_op;
3229             }
3230             gen_cmpxchg8b(s, env, modrm);
3231             break;
3232 
3233         case 7: /* RDSEED, RDPID with f3 prefix */
3234             if (mod != 3 ||
3235                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3236                 goto illegal_op;
3237             }
3238             if (s->prefix & PREFIX_REPZ) {
3239                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3240                     goto illegal_op;
3241                 }
3242                 gen_helper_rdpid(s->T0, tcg_env);
3243                 rm = (modrm & 7) | REX_B(s);
3244                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3245                 break;
3246             } else {
3247                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3248                     goto illegal_op;
3249                 }
3250                 goto do_rdrand;
3251             }
3252 
3253         case 6: /* RDRAND */
3254             if (mod != 3 ||
3255                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3256                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3257                 goto illegal_op;
3258             }
3259         do_rdrand:
3260             translator_io_start(&s->base);
3261             gen_helper_rdrand(s->T0, tcg_env);
3262             rm = (modrm & 7) | REX_B(s);
3263             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3264             set_cc_op(s, CC_OP_EFLAGS);
3265             break;
3266 
3267         default:
3268             goto illegal_op;
3269         }
3270         break;
3271 
3272         /**************************/
3273         /* shifts */
3274     case 0x1a4: /* shld imm */
3275         op = 0;
3276         shift = 1;
3277         goto do_shiftd;
3278     case 0x1a5: /* shld cl */
3279         op = 0;
3280         shift = 0;
3281         goto do_shiftd;
3282     case 0x1ac: /* shrd imm */
3283         op = 1;
3284         shift = 1;
3285         goto do_shiftd;
3286     case 0x1ad: /* shrd cl */
3287         op = 1;
3288         shift = 0;
3289     do_shiftd:
3290         ot = dflag;
3291         modrm = x86_ldub_code(env, s);
3292         mod = (modrm >> 6) & 3;
3293         rm = (modrm & 7) | REX_B(s);
3294         reg = ((modrm >> 3) & 7) | REX_R(s);
3295         if (mod != 3) {
3296             gen_lea_modrm(env, s, modrm);
3297             opreg = OR_TMP0;
3298         } else {
3299             opreg = rm;
3300         }
3301         gen_op_mov_v_reg(s, ot, s->T1, reg);
3302 
3303         if (shift) {
3304             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
3305             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
3306         } else {
3307             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
3308         }
3309         break;
3310 
3311         /************************/
3312         /* bit operations */
3313     case 0x1ba: /* bt/bts/btr/btc Gv, im */
3314         ot = dflag;
3315         modrm = x86_ldub_code(env, s);
3316         op = (modrm >> 3) & 7;
3317         mod = (modrm >> 6) & 3;
3318         rm = (modrm & 7) | REX_B(s);
3319         if (mod != 3) {
3320             s->rip_offset = 1;
3321             gen_lea_modrm(env, s, modrm);
3322             if (!(s->prefix & PREFIX_LOCK)) {
3323                 gen_op_ld_v(s, ot, s->T0, s->A0);
3324             }
3325         } else {
3326             gen_op_mov_v_reg(s, ot, s->T0, rm);
3327         }
3328         /* load shift */
3329         val = x86_ldub_code(env, s);
3330         tcg_gen_movi_tl(s->T1, val);
3331         if (op < 4)
3332             goto unknown_op;
3333         op -= 4;
3334         goto bt_op;
3335     case 0x1a3: /* bt Gv, Ev */
3336         op = 0;
3337         goto do_btx;
3338     case 0x1ab: /* bts */
3339         op = 1;
3340         goto do_btx;
3341     case 0x1b3: /* btr */
3342         op = 2;
3343         goto do_btx;
3344     case 0x1bb: /* btc */
3345         op = 3;
3346     do_btx:
3347         ot = dflag;
3348         modrm = x86_ldub_code(env, s);
3349         reg = ((modrm >> 3) & 7) | REX_R(s);
3350         mod = (modrm >> 6) & 3;
3351         rm = (modrm & 7) | REX_B(s);
3352         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
3353         if (mod != 3) {
3354             AddressParts a = gen_lea_modrm_0(env, s, modrm);
3355             /* specific case: we need to add a displacement */
3356             gen_exts(ot, s->T1);
3357             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
3358             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
3359             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
3360             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
3361             if (!(s->prefix & PREFIX_LOCK)) {
3362                 gen_op_ld_v(s, ot, s->T0, s->A0);
3363             }
3364         } else {
3365             gen_op_mov_v_reg(s, ot, s->T0, rm);
3366         }
3367     bt_op:
3368         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
3369         tcg_gen_movi_tl(s->tmp0, 1);
3370         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
3371         if (s->prefix & PREFIX_LOCK) {
3372             switch (op) {
3373             case 0: /* bt */
3374                 /* Needs no atomic ops; we suppressed the normal
3375                    memory load for LOCK above so do it now.  */
3376                 gen_op_ld_v(s, ot, s->T0, s->A0);
3377                 break;
3378             case 1: /* bts */
3379                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
3380                                            s->mem_index, ot | MO_LE);
3381                 break;
3382             case 2: /* btr */
3383                 tcg_gen_not_tl(s->tmp0, s->tmp0);
3384                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
3385                                             s->mem_index, ot | MO_LE);
3386                 break;
3387             default:
3388             case 3: /* btc */
3389                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
3390                                             s->mem_index, ot | MO_LE);
3391                 break;
3392             }
3393             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3394         } else {
3395             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3396             switch (op) {
3397             case 0: /* bt */
3398                 /* Data already loaded; nothing to do.  */
3399                 break;
3400             case 1: /* bts */
3401                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
3402                 break;
3403             case 2: /* btr */
3404                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
3405                 break;
3406             default:
3407             case 3: /* btc */
3408                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
3409                 break;
3410             }
3411             if (op != 0) {
3412                 if (mod != 3) {
3413                     gen_op_st_v(s, ot, s->T0, s->A0);
3414                 } else {
3415                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3416                 }
3417             }
3418         }
3419 
3420         /* Delay all CC updates until after the store above.  Note that
3421            C is the result of the test, Z is unchanged, and the others
3422            are all undefined.  */
3423         switch (s->cc_op) {
3424         case CC_OP_MULB ... CC_OP_MULQ:
3425         case CC_OP_ADDB ... CC_OP_ADDQ:
3426         case CC_OP_ADCB ... CC_OP_ADCQ:
3427         case CC_OP_SUBB ... CC_OP_SUBQ:
3428         case CC_OP_SBBB ... CC_OP_SBBQ:
3429         case CC_OP_LOGICB ... CC_OP_LOGICQ:
3430         case CC_OP_INCB ... CC_OP_INCQ:
3431         case CC_OP_DECB ... CC_OP_DECQ:
3432         case CC_OP_SHLB ... CC_OP_SHLQ:
3433         case CC_OP_SARB ... CC_OP_SARQ:
3434         case CC_OP_BMILGB ... CC_OP_BMILGQ:
3435             /* Z was going to be computed from the non-zero status of CC_DST.
3436                We can get that same Z value (and the new C value) by leaving
3437                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
3438                same width.  */
3439             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
3440             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
3441             break;
3442         default:
3443             /* Otherwise, generate EFLAGS and replace the C bit.  */
3444             gen_compute_eflags(s);
3445             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
3446                                ctz32(CC_C), 1);
3447             break;
3448         }
3449         break;
3450     case 0x1bc: /* bsf / tzcnt */
3451     case 0x1bd: /* bsr / lzcnt */
3452         ot = dflag;
3453         modrm = x86_ldub_code(env, s);
3454         reg = ((modrm >> 3) & 7) | REX_R(s);
3455         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3456         gen_extu(ot, s->T0);
3457 
3458         /* Note that lzcnt and tzcnt are in different extensions.  */
3459         if ((prefixes & PREFIX_REPZ)
3460             && (b & 1
3461                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
3462                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
3463             int size = 8 << ot;
3464             /* For lzcnt/tzcnt, C bit is defined related to the input. */
3465             tcg_gen_mov_tl(cpu_cc_src, s->T0);
3466             if (b & 1) {
3467                 /* For lzcnt, reduce the target_ulong result by the
3468                    number of zeros that we expect to find at the top.  */
3469                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
3470                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
3471             } else {
3472                 /* For tzcnt, a zero input must return the operand size.  */
3473                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
3474             }
3475             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
3476             gen_op_update1_cc(s);
3477             set_cc_op(s, CC_OP_BMILGB + ot);
3478         } else {
3479             /* For bsr/bsf, only the Z bit is defined and it is related
3480                to the input and not the result.  */
3481             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3482             set_cc_op(s, CC_OP_LOGICB + ot);
3483 
3484             /* ??? The manual says that the output is undefined when the
3485                input is zero, but real hardware leaves it unchanged, and
3486                real programs appear to depend on that.  Accomplish this
3487                by passing the output as the value to return upon zero.  */
3488             if (b & 1) {
3489                 /* For bsr, return the bit index of the first 1 bit,
3490                    not the count of leading zeros.  */
3491                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
3492                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
3493                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
3494             } else {
3495                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
3496             }
3497         }
3498         gen_op_mov_reg_v(s, ot, reg, s->T0);
3499         break;
3500     case 0x130: /* wrmsr */
3501     case 0x132: /* rdmsr */
3502         if (check_cpl0(s)) {
3503             gen_update_cc_op(s);
3504             gen_update_eip_cur(s);
3505             if (b & 2) {
3506                 gen_helper_rdmsr(tcg_env);
3507             } else {
3508                 gen_helper_wrmsr(tcg_env);
3509                 s->base.is_jmp = DISAS_EOB_NEXT;
3510             }
3511         }
3512         break;
3513     case 0x131: /* rdtsc */
3514         gen_update_cc_op(s);
3515         gen_update_eip_cur(s);
3516         translator_io_start(&s->base);
3517         gen_helper_rdtsc(tcg_env);
3518         break;
3519     case 0x133: /* rdpmc */
3520         gen_update_cc_op(s);
3521         gen_update_eip_cur(s);
3522         gen_helper_rdpmc(tcg_env);
3523         s->base.is_jmp = DISAS_NORETURN;
3524         break;
3525     case 0x134: /* sysenter */
3526         /* For AMD SYSENTER is not valid in long mode */
3527         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
3528             goto illegal_op;
3529         }
3530         if (!PE(s)) {
3531             gen_exception_gpf(s);
3532         } else {
3533             gen_helper_sysenter(tcg_env);
3534             s->base.is_jmp = DISAS_EOB_ONLY;
3535         }
3536         break;
3537     case 0x135: /* sysexit */
3538         /* For AMD SYSEXIT is not valid in long mode */
3539         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
3540             goto illegal_op;
3541         }
3542         if (!PE(s) || CPL(s) != 0) {
3543             gen_exception_gpf(s);
3544         } else {
3545             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
3546             s->base.is_jmp = DISAS_EOB_ONLY;
3547         }
3548         break;
3549     case 0x105: /* syscall */
3550         /* For Intel SYSCALL is only valid in long mode */
3551         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
3552             goto illegal_op;
3553         }
3554         gen_update_cc_op(s);
3555         gen_update_eip_cur(s);
3556         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
3557         /* TF handling for the syscall insn is different. The TF bit is  checked
3558            after the syscall insn completes. This allows #DB to not be
3559            generated after one has entered CPL0 if TF is set in FMASK.  */
3560         gen_eob_syscall(s);
3561         break;
3562     case 0x107: /* sysret */
3563         /* For Intel SYSRET is only valid in long mode */
3564         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
3565             goto illegal_op;
3566         }
3567         if (!PE(s) || CPL(s) != 0) {
3568             gen_exception_gpf(s);
3569         } else {
3570             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
3571             /* condition codes are modified only in long mode */
3572             if (LMA(s)) {
3573                 set_cc_op(s, CC_OP_EFLAGS);
3574             }
3575             /* TF handling for the sysret insn is different. The TF bit is
3576                checked after the sysret insn completes. This allows #DB to be
3577                generated "as if" the syscall insn in userspace has just
3578                completed.  */
3579             gen_eob_syscall(s);
3580         }
3581         break;
3582     case 0x1a2: /* cpuid */
3583         gen_update_cc_op(s);
3584         gen_update_eip_cur(s);
3585         gen_helper_cpuid(tcg_env);
3586         break;
3587     case 0x100:
3588         modrm = x86_ldub_code(env, s);
3589         mod = (modrm >> 6) & 3;
3590         op = (modrm >> 3) & 7;
3591         switch(op) {
3592         case 0: /* sldt */
3593             if (!PE(s) || VM86(s))
3594                 goto illegal_op;
3595             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3596                 break;
3597             }
3598             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3599             tcg_gen_ld32u_tl(s->T0, tcg_env,
3600                              offsetof(CPUX86State, ldt.selector));
3601             ot = mod == 3 ? dflag : MO_16;
3602             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
3603             break;
3604         case 2: /* lldt */
3605             if (!PE(s) || VM86(s))
3606                 goto illegal_op;
3607             if (check_cpl0(s)) {
3608                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3609                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3610                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3611                 gen_helper_lldt(tcg_env, s->tmp2_i32);
3612             }
3613             break;
3614         case 1: /* str */
3615             if (!PE(s) || VM86(s))
3616                 goto illegal_op;
3617             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3618                 break;
3619             }
3620             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3621             tcg_gen_ld32u_tl(s->T0, tcg_env,
3622                              offsetof(CPUX86State, tr.selector));
3623             ot = mod == 3 ? dflag : MO_16;
3624             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
3625             break;
3626         case 3: /* ltr */
3627             if (!PE(s) || VM86(s))
3628                 goto illegal_op;
3629             if (check_cpl0(s)) {
3630                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3631                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3632                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3633                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3634             }
3635             break;
3636         case 4: /* verr */
3637         case 5: /* verw */
3638             if (!PE(s) || VM86(s))
3639                 goto illegal_op;
3640             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3641             gen_update_cc_op(s);
3642             if (op == 4) {
3643                 gen_helper_verr(tcg_env, s->T0);
3644             } else {
3645                 gen_helper_verw(tcg_env, s->T0);
3646             }
3647             set_cc_op(s, CC_OP_EFLAGS);
3648             break;
3649         default:
3650             goto unknown_op;
3651         }
3652         break;
3653 
3654     case 0x101:
3655         modrm = x86_ldub_code(env, s);
3656         switch (modrm) {
3657         CASE_MODRM_MEM_OP(0): /* sgdt */
3658             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3659                 break;
3660             }
3661             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3662             gen_lea_modrm(env, s, modrm);
3663             tcg_gen_ld32u_tl(s->T0,
3664                              tcg_env, offsetof(CPUX86State, gdt.limit));
3665             gen_op_st_v(s, MO_16, s->T0, s->A0);
3666             gen_add_A0_im(s, 2);
3667             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3668             /*
3669              * NB: Despite a confusing description in Intel CPU documentation,
3670              *     all 32-bits are written regardless of operand size.
3671              */
3672             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3673             break;
3674 
3675         case 0xc8: /* monitor */
3676             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3677                 goto illegal_op;
3678             }
3679             gen_update_cc_op(s);
3680             gen_update_eip_cur(s);
3681             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3682             gen_add_A0_ds_seg(s);
3683             gen_helper_monitor(tcg_env, s->A0);
3684             break;
3685 
3686         case 0xc9: /* mwait */
3687             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3688                 goto illegal_op;
3689             }
3690             gen_update_cc_op(s);
3691             gen_update_eip_cur(s);
3692             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3693             s->base.is_jmp = DISAS_NORETURN;
3694             break;
3695 
3696         case 0xca: /* clac */
3697             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3698                 || CPL(s) != 0) {
3699                 goto illegal_op;
3700             }
3701             gen_reset_eflags(s, AC_MASK);
3702             s->base.is_jmp = DISAS_EOB_NEXT;
3703             break;
3704 
3705         case 0xcb: /* stac */
3706             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3707                 || CPL(s) != 0) {
3708                 goto illegal_op;
3709             }
3710             gen_set_eflags(s, AC_MASK);
3711             s->base.is_jmp = DISAS_EOB_NEXT;
3712             break;
3713 
3714         CASE_MODRM_MEM_OP(1): /* sidt */
3715             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3716                 break;
3717             }
3718             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3719             gen_lea_modrm(env, s, modrm);
3720             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3721             gen_op_st_v(s, MO_16, s->T0, s->A0);
3722             gen_add_A0_im(s, 2);
3723             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3724             /*
3725              * NB: Despite a confusing description in Intel CPU documentation,
3726              *     all 32-bits are written regardless of operand size.
3727              */
3728             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3729             break;
3730 
3731         case 0xd0: /* xgetbv */
3732             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3733                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3734                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3735                 goto illegal_op;
3736             }
3737             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3738             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3739             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3740             break;
3741 
3742         case 0xd1: /* xsetbv */
3743             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3744                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3745                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3746                 goto illegal_op;
3747             }
3748             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3749             if (!check_cpl0(s)) {
3750                 break;
3751             }
3752             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3753                                   cpu_regs[R_EDX]);
3754             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3755             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3756             /* End TB because translation flags may change.  */
3757             s->base.is_jmp = DISAS_EOB_NEXT;
3758             break;
3759 
3760         case 0xd8: /* VMRUN */
3761             if (!SVME(s) || !PE(s)) {
3762                 goto illegal_op;
3763             }
3764             if (!check_cpl0(s)) {
3765                 break;
3766             }
3767             gen_update_cc_op(s);
3768             gen_update_eip_cur(s);
3769             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3770                              cur_insn_len_i32(s));
3771             tcg_gen_exit_tb(NULL, 0);
3772             s->base.is_jmp = DISAS_NORETURN;
3773             break;
3774 
3775         case 0xd9: /* VMMCALL */
3776             if (!SVME(s)) {
3777                 goto illegal_op;
3778             }
3779             gen_update_cc_op(s);
3780             gen_update_eip_cur(s);
3781             gen_helper_vmmcall(tcg_env);
3782             break;
3783 
3784         case 0xda: /* VMLOAD */
3785             if (!SVME(s) || !PE(s)) {
3786                 goto illegal_op;
3787             }
3788             if (!check_cpl0(s)) {
3789                 break;
3790             }
3791             gen_update_cc_op(s);
3792             gen_update_eip_cur(s);
3793             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3794             break;
3795 
3796         case 0xdb: /* VMSAVE */
3797             if (!SVME(s) || !PE(s)) {
3798                 goto illegal_op;
3799             }
3800             if (!check_cpl0(s)) {
3801                 break;
3802             }
3803             gen_update_cc_op(s);
3804             gen_update_eip_cur(s);
3805             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3806             break;
3807 
3808         case 0xdc: /* STGI */
3809             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3810                 || !PE(s)) {
3811                 goto illegal_op;
3812             }
3813             if (!check_cpl0(s)) {
3814                 break;
3815             }
3816             gen_update_cc_op(s);
3817             gen_helper_stgi(tcg_env);
3818             s->base.is_jmp = DISAS_EOB_NEXT;
3819             break;
3820 
3821         case 0xdd: /* CLGI */
3822             if (!SVME(s) || !PE(s)) {
3823                 goto illegal_op;
3824             }
3825             if (!check_cpl0(s)) {
3826                 break;
3827             }
3828             gen_update_cc_op(s);
3829             gen_update_eip_cur(s);
3830             gen_helper_clgi(tcg_env);
3831             break;
3832 
3833         case 0xde: /* SKINIT */
3834             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3835                 || !PE(s)) {
3836                 goto illegal_op;
3837             }
3838             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3839             /* If not intercepted, not implemented -- raise #UD. */
3840             goto illegal_op;
3841 
3842         case 0xdf: /* INVLPGA */
3843             if (!SVME(s) || !PE(s)) {
3844                 goto illegal_op;
3845             }
3846             if (!check_cpl0(s)) {
3847                 break;
3848             }
3849             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3850             if (s->aflag == MO_64) {
3851                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3852             } else {
3853                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3854             }
3855             gen_helper_flush_page(tcg_env, s->A0);
3856             s->base.is_jmp = DISAS_EOB_NEXT;
3857             break;
3858 
3859         CASE_MODRM_MEM_OP(2): /* lgdt */
3860             if (!check_cpl0(s)) {
3861                 break;
3862             }
3863             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3864             gen_lea_modrm(env, s, modrm);
3865             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3866             gen_add_A0_im(s, 2);
3867             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3868             if (dflag == MO_16) {
3869                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3870             }
3871             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3872             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3873             break;
3874 
3875         CASE_MODRM_MEM_OP(3): /* lidt */
3876             if (!check_cpl0(s)) {
3877                 break;
3878             }
3879             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3880             gen_lea_modrm(env, s, modrm);
3881             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3882             gen_add_A0_im(s, 2);
3883             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3884             if (dflag == MO_16) {
3885                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3886             }
3887             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3888             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3889             break;
3890 
3891         CASE_MODRM_OP(4): /* smsw */
3892             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3893                 break;
3894             }
3895             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3896             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3897             /*
3898              * In 32-bit mode, the higher 16 bits of the destination
3899              * register are undefined.  In practice CR0[31:0] is stored
3900              * just like in 64-bit mode.
3901              */
3902             mod = (modrm >> 6) & 3;
3903             ot = (mod != 3 ? MO_16 : s->dflag);
3904             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
3905             break;
3906         case 0xee: /* rdpkru */
3907             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3908                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3909                 goto illegal_op;
3910             }
3911             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3912             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3913             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3914             break;
3915         case 0xef: /* wrpkru */
3916             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3917                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3918                 goto illegal_op;
3919             }
3920             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3921                                   cpu_regs[R_EDX]);
3922             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3923             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3924             break;
3925 
3926         CASE_MODRM_OP(6): /* lmsw */
3927             if (!check_cpl0(s)) {
3928                 break;
3929             }
3930             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3931             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3932             /*
3933              * Only the 4 lower bits of CR0 are modified.
3934              * PE cannot be set to zero if already set to one.
3935              */
3936             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3937             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3938             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3939             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3940             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3941             s->base.is_jmp = DISAS_EOB_NEXT;
3942             break;
3943 
3944         CASE_MODRM_MEM_OP(7): /* invlpg */
3945             if (!check_cpl0(s)) {
3946                 break;
3947             }
3948             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3949             gen_lea_modrm(env, s, modrm);
3950             gen_helper_flush_page(tcg_env, s->A0);
3951             s->base.is_jmp = DISAS_EOB_NEXT;
3952             break;
3953 
3954         case 0xf8: /* swapgs */
3955 #ifdef TARGET_X86_64
3956             if (CODE64(s)) {
3957                 if (check_cpl0(s)) {
3958                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3959                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3960                                   offsetof(CPUX86State, kernelgsbase));
3961                     tcg_gen_st_tl(s->T0, tcg_env,
3962                                   offsetof(CPUX86State, kernelgsbase));
3963                 }
3964                 break;
3965             }
3966 #endif
3967             goto illegal_op;
3968 
3969         case 0xf9: /* rdtscp */
3970             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3971                 goto illegal_op;
3972             }
3973             gen_update_cc_op(s);
3974             gen_update_eip_cur(s);
3975             translator_io_start(&s->base);
3976             gen_helper_rdtsc(tcg_env);
3977             gen_helper_rdpid(s->T0, tcg_env);
3978             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3979             break;
3980 
3981         default:
3982             goto unknown_op;
3983         }
3984         break;
3985 
3986     case 0x108: /* invd */
3987     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
3988         if (check_cpl0(s)) {
3989             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
3990             /* nothing to do */
3991         }
3992         break;
3993     case 0x102: /* lar */
3994     case 0x103: /* lsl */
3995         {
3996             TCGLabel *label1;
3997             TCGv t0;
3998             if (!PE(s) || VM86(s))
3999                 goto illegal_op;
4000             ot = dflag != MO_16 ? MO_32 : MO_16;
4001             modrm = x86_ldub_code(env, s);
4002             reg = ((modrm >> 3) & 7) | REX_R(s);
4003             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4004             t0 = tcg_temp_new();
4005             gen_update_cc_op(s);
4006             if (b == 0x102) {
4007                 gen_helper_lar(t0, tcg_env, s->T0);
4008             } else {
4009                 gen_helper_lsl(t0, tcg_env, s->T0);
4010             }
4011             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
4012             label1 = gen_new_label();
4013             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
4014             gen_op_mov_reg_v(s, ot, reg, t0);
4015             gen_set_label(label1);
4016             set_cc_op(s, CC_OP_EFLAGS);
4017         }
4018         break;
4019     case 0x11a:
4020         modrm = x86_ldub_code(env, s);
4021         if (s->flags & HF_MPX_EN_MASK) {
4022             mod = (modrm >> 6) & 3;
4023             reg = ((modrm >> 3) & 7) | REX_R(s);
4024             if (prefixes & PREFIX_REPZ) {
4025                 /* bndcl */
4026                 if (reg >= 4
4027                     || (prefixes & PREFIX_LOCK)
4028                     || s->aflag == MO_16) {
4029                     goto illegal_op;
4030                 }
4031                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
4032             } else if (prefixes & PREFIX_REPNZ) {
4033                 /* bndcu */
4034                 if (reg >= 4
4035                     || (prefixes & PREFIX_LOCK)
4036                     || s->aflag == MO_16) {
4037                     goto illegal_op;
4038                 }
4039                 TCGv_i64 notu = tcg_temp_new_i64();
4040                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
4041                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
4042             } else if (prefixes & PREFIX_DATA) {
4043                 /* bndmov -- from reg/mem */
4044                 if (reg >= 4 || s->aflag == MO_16) {
4045                     goto illegal_op;
4046                 }
4047                 if (mod == 3) {
4048                     int reg2 = (modrm & 7) | REX_B(s);
4049                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
4050                         goto illegal_op;
4051                     }
4052                     if (s->flags & HF_MPX_IU_MASK) {
4053                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
4054                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
4055                     }
4056                 } else {
4057                     gen_lea_modrm(env, s, modrm);
4058                     if (CODE64(s)) {
4059                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
4060                                             s->mem_index, MO_LEUQ);
4061                         tcg_gen_addi_tl(s->A0, s->A0, 8);
4062                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
4063                                             s->mem_index, MO_LEUQ);
4064                     } else {
4065                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
4066                                             s->mem_index, MO_LEUL);
4067                         tcg_gen_addi_tl(s->A0, s->A0, 4);
4068                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
4069                                             s->mem_index, MO_LEUL);
4070                     }
4071                     /* bnd registers are now in-use */
4072                     gen_set_hflag(s, HF_MPX_IU_MASK);
4073                 }
4074             } else if (mod != 3) {
4075                 /* bndldx */
4076                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4077                 if (reg >= 4
4078                     || (prefixes & PREFIX_LOCK)
4079                     || s->aflag == MO_16
4080                     || a.base < -1) {
4081                     goto illegal_op;
4082                 }
4083                 if (a.base >= 0) {
4084                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
4085                 } else {
4086                     tcg_gen_movi_tl(s->A0, 0);
4087                 }
4088                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
4089                 if (a.index >= 0) {
4090                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
4091                 } else {
4092                     tcg_gen_movi_tl(s->T0, 0);
4093                 }
4094                 if (CODE64(s)) {
4095                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
4096                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
4097                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
4098                 } else {
4099                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
4100                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
4101                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
4102                 }
4103                 gen_set_hflag(s, HF_MPX_IU_MASK);
4104             }
4105         }
4106         gen_nop_modrm(env, s, modrm);
4107         break;
4108     case 0x11b:
4109         modrm = x86_ldub_code(env, s);
4110         if (s->flags & HF_MPX_EN_MASK) {
4111             mod = (modrm >> 6) & 3;
4112             reg = ((modrm >> 3) & 7) | REX_R(s);
4113             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
4114                 /* bndmk */
4115                 if (reg >= 4
4116                     || (prefixes & PREFIX_LOCK)
4117                     || s->aflag == MO_16) {
4118                     goto illegal_op;
4119                 }
4120                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4121                 if (a.base >= 0) {
4122                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
4123                     if (!CODE64(s)) {
4124                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
4125                     }
4126                 } else if (a.base == -1) {
4127                     /* no base register has lower bound of 0 */
4128                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
4129                 } else {
4130                     /* rip-relative generates #ud */
4131                     goto illegal_op;
4132                 }
4133                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
4134                 if (!CODE64(s)) {
4135                     tcg_gen_ext32u_tl(s->A0, s->A0);
4136                 }
4137                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
4138                 /* bnd registers are now in-use */
4139                 gen_set_hflag(s, HF_MPX_IU_MASK);
4140                 break;
4141             } else if (prefixes & PREFIX_REPNZ) {
4142                 /* bndcn */
4143                 if (reg >= 4
4144                     || (prefixes & PREFIX_LOCK)
4145                     || s->aflag == MO_16) {
4146                     goto illegal_op;
4147                 }
4148                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
4149             } else if (prefixes & PREFIX_DATA) {
4150                 /* bndmov -- to reg/mem */
4151                 if (reg >= 4 || s->aflag == MO_16) {
4152                     goto illegal_op;
4153                 }
4154                 if (mod == 3) {
4155                     int reg2 = (modrm & 7) | REX_B(s);
4156                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
4157                         goto illegal_op;
4158                     }
4159                     if (s->flags & HF_MPX_IU_MASK) {
4160                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
4161                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
4162                     }
4163                 } else {
4164                     gen_lea_modrm(env, s, modrm);
4165                     if (CODE64(s)) {
4166                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
4167                                             s->mem_index, MO_LEUQ);
4168                         tcg_gen_addi_tl(s->A0, s->A0, 8);
4169                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
4170                                             s->mem_index, MO_LEUQ);
4171                     } else {
4172                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
4173                                             s->mem_index, MO_LEUL);
4174                         tcg_gen_addi_tl(s->A0, s->A0, 4);
4175                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
4176                                             s->mem_index, MO_LEUL);
4177                     }
4178                 }
4179             } else if (mod != 3) {
4180                 /* bndstx */
4181                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4182                 if (reg >= 4
4183                     || (prefixes & PREFIX_LOCK)
4184                     || s->aflag == MO_16
4185                     || a.base < -1) {
4186                     goto illegal_op;
4187                 }
4188                 if (a.base >= 0) {
4189                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
4190                 } else {
4191                     tcg_gen_movi_tl(s->A0, 0);
4192                 }
4193                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
4194                 if (a.index >= 0) {
4195                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
4196                 } else {
4197                     tcg_gen_movi_tl(s->T0, 0);
4198                 }
4199                 if (CODE64(s)) {
4200                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
4201                                         cpu_bndl[reg], cpu_bndu[reg]);
4202                 } else {
4203                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
4204                                         cpu_bndl[reg], cpu_bndu[reg]);
4205                 }
4206             }
4207         }
4208         gen_nop_modrm(env, s, modrm);
4209         break;
4210 
4211     case 0x120: /* mov reg, crN */
4212     case 0x122: /* mov crN, reg */
4213         if (!check_cpl0(s)) {
4214             break;
4215         }
4216         modrm = x86_ldub_code(env, s);
4217         /*
4218          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
4219          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
4220          * processors all show that the mod bits are assumed to be 1's,
4221          * regardless of actual values.
4222          */
4223         rm = (modrm & 7) | REX_B(s);
4224         reg = ((modrm >> 3) & 7) | REX_R(s);
4225         switch (reg) {
4226         case 0:
4227             if ((prefixes & PREFIX_LOCK) &&
4228                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
4229                 reg = 8;
4230             }
4231             break;
4232         case 2:
4233         case 3:
4234         case 4:
4235         case 8:
4236             break;
4237         default:
4238             goto unknown_op;
4239         }
4240         ot  = (CODE64(s) ? MO_64 : MO_32);
4241 
4242         translator_io_start(&s->base);
4243         if (b & 2) {
4244             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
4245             gen_op_mov_v_reg(s, ot, s->T0, rm);
4246             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
4247             s->base.is_jmp = DISAS_EOB_NEXT;
4248         } else {
4249             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
4250             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
4251             gen_op_mov_reg_v(s, ot, rm, s->T0);
4252         }
4253         break;
4254 
4255     case 0x121: /* mov reg, drN */
4256     case 0x123: /* mov drN, reg */
4257         if (check_cpl0(s)) {
4258             modrm = x86_ldub_code(env, s);
4259             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
4260              * AMD documentation (24594.pdf) and testing of
4261              * intel 386 and 486 processors all show that the mod bits
4262              * are assumed to be 1's, regardless of actual values.
4263              */
4264             rm = (modrm & 7) | REX_B(s);
4265             reg = ((modrm >> 3) & 7) | REX_R(s);
4266             if (CODE64(s))
4267                 ot = MO_64;
4268             else
4269                 ot = MO_32;
4270             if (reg >= 8) {
4271                 goto illegal_op;
4272             }
4273             if (b & 2) {
4274                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
4275                 gen_op_mov_v_reg(s, ot, s->T0, rm);
4276                 tcg_gen_movi_i32(s->tmp2_i32, reg);
4277                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
4278                 s->base.is_jmp = DISAS_EOB_NEXT;
4279             } else {
4280                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
4281                 tcg_gen_movi_i32(s->tmp2_i32, reg);
4282                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
4283                 gen_op_mov_reg_v(s, ot, rm, s->T0);
4284             }
4285         }
4286         break;
4287     case 0x106: /* clts */
4288         if (check_cpl0(s)) {
4289             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
4290             gen_helper_clts(tcg_env);
4291             /* abort block because static cpu state changed */
4292             s->base.is_jmp = DISAS_EOB_NEXT;
4293         }
4294         break;
4295     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
4296     case 0x1ae:
4297         modrm = x86_ldub_code(env, s);
4298         switch (modrm) {
4299         CASE_MODRM_MEM_OP(0): /* fxsave */
4300             if (!(s->cpuid_features & CPUID_FXSR)
4301                 || (prefixes & PREFIX_LOCK)) {
4302                 goto illegal_op;
4303             }
4304             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
4305                 gen_exception(s, EXCP07_PREX);
4306                 break;
4307             }
4308             gen_lea_modrm(env, s, modrm);
4309             gen_helper_fxsave(tcg_env, s->A0);
4310             break;
4311 
4312         CASE_MODRM_MEM_OP(1): /* fxrstor */
4313             if (!(s->cpuid_features & CPUID_FXSR)
4314                 || (prefixes & PREFIX_LOCK)) {
4315                 goto illegal_op;
4316             }
4317             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
4318                 gen_exception(s, EXCP07_PREX);
4319                 break;
4320             }
4321             gen_lea_modrm(env, s, modrm);
4322             gen_helper_fxrstor(tcg_env, s->A0);
4323             break;
4324 
4325         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
4326             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
4327                 goto illegal_op;
4328             }
4329             if (s->flags & HF_TS_MASK) {
4330                 gen_exception(s, EXCP07_PREX);
4331                 break;
4332             }
4333             gen_lea_modrm(env, s, modrm);
4334             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4335             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
4336             break;
4337 
4338         CASE_MODRM_MEM_OP(3): /* stmxcsr */
4339             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
4340                 goto illegal_op;
4341             }
4342             if (s->flags & HF_TS_MASK) {
4343                 gen_exception(s, EXCP07_PREX);
4344                 break;
4345             }
4346             gen_helper_update_mxcsr(tcg_env);
4347             gen_lea_modrm(env, s, modrm);
4348             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
4349             gen_op_st_v(s, MO_32, s->T0, s->A0);
4350             break;
4351 
4352         CASE_MODRM_MEM_OP(4): /* xsave */
4353             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
4354                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
4355                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
4356                 goto illegal_op;
4357             }
4358             gen_lea_modrm(env, s, modrm);
4359             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
4360                                   cpu_regs[R_EDX]);
4361             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
4362             break;
4363 
4364         CASE_MODRM_MEM_OP(5): /* xrstor */
4365             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
4366                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
4367                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
4368                 goto illegal_op;
4369             }
4370             gen_lea_modrm(env, s, modrm);
4371             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
4372                                   cpu_regs[R_EDX]);
4373             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
4374             /* XRSTOR is how MPX is enabled, which changes how
4375                we translate.  Thus we need to end the TB.  */
4376             s->base.is_jmp = DISAS_EOB_NEXT;
4377             break;
4378 
4379         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
4380             if (prefixes & PREFIX_LOCK) {
4381                 goto illegal_op;
4382             }
4383             if (prefixes & PREFIX_DATA) {
4384                 /* clwb */
4385                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
4386                     goto illegal_op;
4387                 }
4388                 gen_nop_modrm(env, s, modrm);
4389             } else {
4390                 /* xsaveopt */
4391                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
4392                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
4393                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
4394                     goto illegal_op;
4395                 }
4396                 gen_lea_modrm(env, s, modrm);
4397                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
4398                                       cpu_regs[R_EDX]);
4399                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
4400             }
4401             break;
4402 
4403         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
4404             if (prefixes & PREFIX_LOCK) {
4405                 goto illegal_op;
4406             }
4407             if (prefixes & PREFIX_DATA) {
4408                 /* clflushopt */
4409                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
4410                     goto illegal_op;
4411                 }
4412             } else {
4413                 /* clflush */
4414                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
4415                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
4416                     goto illegal_op;
4417                 }
4418             }
4419             gen_nop_modrm(env, s, modrm);
4420             break;
4421 
4422         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
4423         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
4424         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
4425         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
4426             if (CODE64(s)
4427                 && (prefixes & PREFIX_REPZ)
4428                 && !(prefixes & PREFIX_LOCK)
4429                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
4430                 TCGv base, treg, src, dst;
4431 
4432                 /* Preserve hflags bits by testing CR4 at runtime.  */
4433                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
4434                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
4435 
4436                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
4437                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
4438 
4439                 if (modrm & 0x10) {
4440                     /* wr*base */
4441                     dst = base, src = treg;
4442                 } else {
4443                     /* rd*base */
4444                     dst = treg, src = base;
4445                 }
4446 
4447                 if (s->dflag == MO_32) {
4448                     tcg_gen_ext32u_tl(dst, src);
4449                 } else {
4450                     tcg_gen_mov_tl(dst, src);
4451                 }
4452                 break;
4453             }
4454             goto unknown_op;
4455 
4456         case 0xf8 ... 0xff: /* sfence */
4457             if (!(s->cpuid_features & CPUID_SSE)
4458                 || (prefixes & PREFIX_LOCK)) {
4459                 goto illegal_op;
4460             }
4461             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
4462             break;
4463         case 0xe8 ... 0xef: /* lfence */
4464             if (!(s->cpuid_features & CPUID_SSE)
4465                 || (prefixes & PREFIX_LOCK)) {
4466                 goto illegal_op;
4467             }
4468             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
4469             break;
4470         case 0xf0 ... 0xf7: /* mfence */
4471             if (!(s->cpuid_features & CPUID_SSE2)
4472                 || (prefixes & PREFIX_LOCK)) {
4473                 goto illegal_op;
4474             }
4475             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
4476             break;
4477 
4478         default:
4479             goto unknown_op;
4480         }
4481         break;
4482 
4483     case 0x1aa: /* rsm */
4484         gen_svm_check_intercept(s, SVM_EXIT_RSM);
4485         if (!(s->flags & HF_SMM_MASK))
4486             goto illegal_op;
4487 #ifdef CONFIG_USER_ONLY
4488         /* we should not be in SMM mode */
4489         g_assert_not_reached();
4490 #else
4491         gen_update_cc_op(s);
4492         gen_update_eip_next(s);
4493         gen_helper_rsm(tcg_env);
4494 #endif /* CONFIG_USER_ONLY */
4495         s->base.is_jmp = DISAS_EOB_ONLY;
4496         break;
4497     case 0x1b8: /* SSE4.2 popcnt */
4498         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
4499              PREFIX_REPZ)
4500             goto illegal_op;
4501         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
4502             goto illegal_op;
4503 
4504         modrm = x86_ldub_code(env, s);
4505         reg = ((modrm >> 3) & 7) | REX_R(s);
4506 
4507         ot = dflag;
4508         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4509         gen_extu(ot, s->T0);
4510         tcg_gen_mov_tl(cpu_cc_src, s->T0);
4511         tcg_gen_ctpop_tl(s->T0, s->T0);
4512         gen_op_mov_reg_v(s, ot, reg, s->T0);
4513 
4514         set_cc_op(s, CC_OP_POPCNT);
4515         break;
4516     default:
4517         g_assert_not_reached();
4518     }
4519     return;
4520  illegal_op:
4521     gen_illegal_opcode(s);
4522     return;
4523  unknown_op:
4524     gen_unknown_opcode(env, s);
4525 }
4526 
4527 #include "decode-new.h"
4528 #include "emit.c.inc"
4529 #include "decode-new.c.inc"
4530 
tcg_x86_init(void)4531 void tcg_x86_init(void)
4532 {
4533     static const char reg_names[CPU_NB_REGS][4] = {
4534 #ifdef TARGET_X86_64
4535         [R_EAX] = "rax",
4536         [R_EBX] = "rbx",
4537         [R_ECX] = "rcx",
4538         [R_EDX] = "rdx",
4539         [R_ESI] = "rsi",
4540         [R_EDI] = "rdi",
4541         [R_EBP] = "rbp",
4542         [R_ESP] = "rsp",
4543         [8]  = "r8",
4544         [9]  = "r9",
4545         [10] = "r10",
4546         [11] = "r11",
4547         [12] = "r12",
4548         [13] = "r13",
4549         [14] = "r14",
4550         [15] = "r15",
4551 #else
4552         [R_EAX] = "eax",
4553         [R_EBX] = "ebx",
4554         [R_ECX] = "ecx",
4555         [R_EDX] = "edx",
4556         [R_ESI] = "esi",
4557         [R_EDI] = "edi",
4558         [R_EBP] = "ebp",
4559         [R_ESP] = "esp",
4560 #endif
4561     };
4562     static const char eip_name[] = {
4563 #ifdef TARGET_X86_64
4564         "rip"
4565 #else
4566         "eip"
4567 #endif
4568     };
4569     static const char seg_base_names[6][8] = {
4570         [R_CS] = "cs_base",
4571         [R_DS] = "ds_base",
4572         [R_ES] = "es_base",
4573         [R_FS] = "fs_base",
4574         [R_GS] = "gs_base",
4575         [R_SS] = "ss_base",
4576     };
4577     static const char bnd_regl_names[4][8] = {
4578         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
4579     };
4580     static const char bnd_regu_names[4][8] = {
4581         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
4582     };
4583     int i;
4584 
4585     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
4586                                        offsetof(CPUX86State, cc_op), "cc_op");
4587     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
4588                                     "cc_dst");
4589     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
4590                                     "cc_src");
4591     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
4592                                      "cc_src2");
4593     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
4594 
4595     for (i = 0; i < CPU_NB_REGS; ++i) {
4596         cpu_regs[i] = tcg_global_mem_new(tcg_env,
4597                                          offsetof(CPUX86State, regs[i]),
4598                                          reg_names[i]);
4599     }
4600 
4601     for (i = 0; i < 6; ++i) {
4602         cpu_seg_base[i]
4603             = tcg_global_mem_new(tcg_env,
4604                                  offsetof(CPUX86State, segs[i].base),
4605                                  seg_base_names[i]);
4606     }
4607 
4608     for (i = 0; i < 4; ++i) {
4609         cpu_bndl[i]
4610             = tcg_global_mem_new_i64(tcg_env,
4611                                      offsetof(CPUX86State, bnd_regs[i].lb),
4612                                      bnd_regl_names[i]);
4613         cpu_bndu[i]
4614             = tcg_global_mem_new_i64(tcg_env,
4615                                      offsetof(CPUX86State, bnd_regs[i].ub),
4616                                      bnd_regu_names[i]);
4617     }
4618 }
4619 
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)4620 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
4621 {
4622     DisasContext *dc = container_of(dcbase, DisasContext, base);
4623     CPUX86State *env = cpu_env(cpu);
4624     uint32_t flags = dc->base.tb->flags;
4625     uint32_t cflags = tb_cflags(dc->base.tb);
4626     int cpl = (flags >> HF_CPL_SHIFT) & 3;
4627     int iopl = (flags >> IOPL_SHIFT) & 3;
4628 
4629     dc->cs_base = dc->base.tb->cs_base;
4630     dc->pc_save = dc->base.pc_next;
4631     dc->flags = flags;
4632 #ifndef CONFIG_USER_ONLY
4633     dc->cpl = cpl;
4634     dc->iopl = iopl;
4635 #endif
4636 
4637     /* We make some simplifying assumptions; validate they're correct. */
4638     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
4639     g_assert(CPL(dc) == cpl);
4640     g_assert(IOPL(dc) == iopl);
4641     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
4642     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
4643     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
4644     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
4645     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
4646     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
4647     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
4648     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
4649 
4650     dc->cc_op = CC_OP_DYNAMIC;
4651     dc->cc_op_dirty = false;
4652     /* select memory access functions */
4653     dc->mem_index = cpu_mmu_index(cpu, false);
4654     dc->cpuid_features = env->features[FEAT_1_EDX];
4655     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
4656     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
4657     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
4658     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
4659     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
4660     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
4661     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
4662     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
4663                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
4664     /*
4665      * If jmp_opt, we want to handle each string instruction individually.
4666      * For icount also disable repz optimization so that each iteration
4667      * is accounted separately.
4668      */
4669     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
4670 
4671     dc->T0 = tcg_temp_new();
4672     dc->T1 = tcg_temp_new();
4673     dc->A0 = tcg_temp_new();
4674 
4675     dc->tmp0 = tcg_temp_new();
4676     dc->tmp1_i64 = tcg_temp_new_i64();
4677     dc->tmp2_i32 = tcg_temp_new_i32();
4678     dc->tmp3_i32 = tcg_temp_new_i32();
4679     dc->tmp4 = tcg_temp_new();
4680     dc->cc_srcT = tcg_temp_new();
4681 }
4682 
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)4683 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
4684 {
4685 }
4686 
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)4687 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
4688 {
4689     DisasContext *dc = container_of(dcbase, DisasContext, base);
4690     target_ulong pc_arg = dc->base.pc_next;
4691 
4692     dc->prev_insn_start = dc->base.insn_start;
4693     dc->prev_insn_end = tcg_last_op();
4694     if (tb_cflags(dcbase->tb) & CF_PCREL) {
4695         pc_arg &= ~TARGET_PAGE_MASK;
4696     }
4697     tcg_gen_insn_start(pc_arg, dc->cc_op);
4698 }
4699 
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)4700 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
4701 {
4702     DisasContext *dc = container_of(dcbase, DisasContext, base);
4703     bool orig_cc_op_dirty = dc->cc_op_dirty;
4704     CCOp orig_cc_op = dc->cc_op;
4705     target_ulong orig_pc_save = dc->pc_save;
4706 
4707 #ifdef TARGET_VSYSCALL_PAGE
4708     /*
4709      * Detect entry into the vsyscall page and invoke the syscall.
4710      */
4711     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
4712         gen_exception(dc, EXCP_VSYSCALL);
4713         dc->base.pc_next = dc->pc + 1;
4714         return;
4715     }
4716 #endif
4717 
4718     switch (sigsetjmp(dc->jmpbuf, 0)) {
4719     case 0:
4720         disas_insn(dc, cpu);
4721         break;
4722     case 1:
4723         gen_exception_gpf(dc);
4724         break;
4725     case 2:
4726         /* Restore state that may affect the next instruction. */
4727         dc->pc = dc->base.pc_next;
4728         /*
4729          * TODO: These save/restore can be removed after the table-based
4730          * decoder is complete; we will be decoding the insn completely
4731          * before any code generation that might affect these variables.
4732          */
4733         dc->cc_op_dirty = orig_cc_op_dirty;
4734         dc->cc_op = orig_cc_op;
4735         dc->pc_save = orig_pc_save;
4736         /* END TODO */
4737         dc->base.num_insns--;
4738         tcg_remove_ops_after(dc->prev_insn_end);
4739         dc->base.insn_start = dc->prev_insn_start;
4740         dc->base.is_jmp = DISAS_TOO_MANY;
4741         return;
4742     default:
4743         g_assert_not_reached();
4744     }
4745 
4746     /*
4747      * Instruction decoding completed (possibly with #GP if the
4748      * 15-byte boundary was exceeded).
4749      */
4750     dc->base.pc_next = dc->pc;
4751     if (dc->base.is_jmp == DISAS_NEXT) {
4752         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
4753             /*
4754              * If single step mode, we generate only one instruction and
4755              * generate an exception.
4756              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
4757              * the flag and abort the translation to give the irqs a
4758              * chance to happen.
4759              */
4760             dc->base.is_jmp = DISAS_EOB_NEXT;
4761         } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
4762             dc->base.is_jmp = DISAS_TOO_MANY;
4763         }
4764     }
4765 }
4766 
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)4767 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
4768 {
4769     DisasContext *dc = container_of(dcbase, DisasContext, base);
4770 
4771     switch (dc->base.is_jmp) {
4772     case DISAS_NORETURN:
4773         break;
4774     case DISAS_TOO_MANY:
4775         gen_update_cc_op(dc);
4776         gen_jmp_rel_csize(dc, 0, 0);
4777         break;
4778     case DISAS_EOB_NEXT:
4779         gen_update_cc_op(dc);
4780         gen_update_eip_cur(dc);
4781         /* fall through */
4782     case DISAS_EOB_ONLY:
4783         gen_eob(dc);
4784         break;
4785     case DISAS_EOB_INHIBIT_IRQ:
4786         gen_update_cc_op(dc);
4787         gen_update_eip_cur(dc);
4788         gen_eob_inhibit_irq(dc);
4789         break;
4790     case DISAS_JUMP:
4791         gen_jr(dc);
4792         break;
4793     default:
4794         g_assert_not_reached();
4795     }
4796 }
4797 
4798 static const TranslatorOps i386_tr_ops = {
4799     .init_disas_context = i386_tr_init_disas_context,
4800     .tb_start           = i386_tr_tb_start,
4801     .insn_start         = i386_tr_insn_start,
4802     .translate_insn     = i386_tr_translate_insn,
4803     .tb_stop            = i386_tr_tb_stop,
4804 };
4805 
4806 /* generate intermediate code for basic block 'tb'.  */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)4807 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
4808                            vaddr pc, void *host_pc)
4809 {
4810     DisasContext dc;
4811 
4812     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
4813 }
4814