xref: /qemu/target/i386/tcg/translate.c (revision f2c04bed)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
28 
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
32 
33 #include "exec/log.h"
34 
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef  HELPER_H
38 
39 /* Fixes for Windows namespace pollution.  */
40 #undef IN
41 #undef OUT
42 
43 #define PREFIX_REPZ   0x01
44 #define PREFIX_REPNZ  0x02
45 #define PREFIX_LOCK   0x04
46 #define PREFIX_DATA   0x08
47 #define PREFIX_ADR    0x10
48 #define PREFIX_VEX    0x20
49 #define PREFIX_REX    0x40
50 
51 #ifdef TARGET_X86_64
52 # define ctztl  ctz64
53 # define clztl  clz64
54 #else
55 # define ctztl  ctz32
56 # define clztl  clz32
57 #endif
58 
59 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
60 #define CASE_MODRM_MEM_OP(OP) \
61     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 
65 #define CASE_MODRM_OP(OP) \
66     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 
71 //#define MACRO_TEST   1
72 
73 /* global register indexes */
74 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
75 static TCGv cpu_eip;
76 static TCGv_i32 cpu_cc_op;
77 static TCGv cpu_regs[CPU_NB_REGS];
78 static TCGv cpu_seg_base[6];
79 static TCGv_i64 cpu_bndl[4];
80 static TCGv_i64 cpu_bndu[4];
81 
82 typedef struct DisasContext {
83     DisasContextBase base;
84 
85     target_ulong pc;       /* pc = eip + cs_base */
86     target_ulong cs_base;  /* base of CS segment */
87     target_ulong pc_save;
88 
89     MemOp aflag;
90     MemOp dflag;
91 
92     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
93     uint8_t prefix;
94 
95     bool has_modrm;
96     uint8_t modrm;
97 
98 #ifndef CONFIG_USER_ONLY
99     uint8_t cpl;   /* code priv level */
100     uint8_t iopl;  /* i/o priv level */
101 #endif
102     uint8_t vex_l;  /* vex vector length */
103     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
104     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
105     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
106 
107 #ifdef TARGET_X86_64
108     uint8_t rex_r;
109     uint8_t rex_x;
110     uint8_t rex_b;
111 #endif
112     bool vex_w; /* used by AVX even on 32-bit processors */
113     bool jmp_opt; /* use direct block chaining for direct jumps */
114     bool repz_opt; /* optimize jumps within repz instructions */
115     bool cc_op_dirty;
116 
117     CCOp cc_op;  /* current CC operation */
118     int mem_index; /* select memory access functions */
119     uint32_t flags; /* all execution flags */
120     int cpuid_features;
121     int cpuid_ext_features;
122     int cpuid_ext2_features;
123     int cpuid_ext3_features;
124     int cpuid_7_0_ebx_features;
125     int cpuid_7_0_ecx_features;
126     int cpuid_7_1_eax_features;
127     int cpuid_xsave_features;
128 
129     /* TCG local temps */
130     TCGv cc_srcT;
131     TCGv A0;
132     TCGv T0;
133     TCGv T1;
134 
135     /* TCG local register indexes (only used inside old micro ops) */
136     TCGv tmp0;
137     TCGv tmp4;
138     TCGv_i32 tmp2_i32;
139     TCGv_i32 tmp3_i32;
140     TCGv_i64 tmp1_i64;
141 
142     sigjmp_buf jmpbuf;
143     TCGOp *prev_insn_start;
144     TCGOp *prev_insn_end;
145 } DisasContext;
146 
147 /*
148  * Point EIP to next instruction before ending translation.
149  * For instructions that can change hflags.
150  */
151 #define DISAS_EOB_NEXT         DISAS_TARGET_0
152 
153 /*
154  * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
155  * already set.  For instructions that activate interrupt shadow.
156  */
157 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_1
158 
159 /*
160  * Return to the main loop; EIP might have already been updated
161  * but even in that case do not use lookup_and_goto_ptr().
162  */
163 #define DISAS_EOB_ONLY         DISAS_TARGET_2
164 
165 /*
166  * EIP has already been updated.  For jumps that wish to use
167  * lookup_and_goto_ptr()
168  */
169 #define DISAS_JUMP             DISAS_TARGET_3
170 
171 /*
172  * EIP has already been updated.  Use updated value of
173  * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
174  */
175 #define DISAS_EOB_RECHECK_TF   DISAS_TARGET_4
176 
177 /* The environment in which user-only runs is constrained. */
178 #ifdef CONFIG_USER_ONLY
179 #define PE(S)     true
180 #define CPL(S)    3
181 #define IOPL(S)   0
182 #define SVME(S)   false
183 #define GUEST(S)  false
184 #else
185 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
186 #define CPL(S)    ((S)->cpl)
187 #define IOPL(S)   ((S)->iopl)
188 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
189 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
190 #endif
191 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
192 #define VM86(S)   false
193 #define CODE32(S) true
194 #define SS32(S)   true
195 #define ADDSEG(S) false
196 #else
197 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
198 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
199 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
200 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
201 #endif
202 #if !defined(TARGET_X86_64)
203 #define CODE64(S) false
204 #elif defined(CONFIG_USER_ONLY)
205 #define CODE64(S) true
206 #else
207 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
208 #endif
209 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
210 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
211 #else
212 #define LMA(S)    false
213 #endif
214 
215 #ifdef TARGET_X86_64
216 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
217 #define REX_W(S)       ((S)->vex_w)
218 #define REX_R(S)       ((S)->rex_r + 0)
219 #define REX_X(S)       ((S)->rex_x + 0)
220 #define REX_B(S)       ((S)->rex_b + 0)
221 #else
222 #define REX_PREFIX(S)  false
223 #define REX_W(S)       false
224 #define REX_R(S)       0
225 #define REX_X(S)       0
226 #define REX_B(S)       0
227 #endif
228 
229 /*
230  * Many sysemu-only helpers are not reachable for user-only.
231  * Define stub generators here, so that we need not either sprinkle
232  * ifdefs through the translator, nor provide the helper function.
233  */
234 #define STUB_HELPER(NAME, ...) \
235     static inline void gen_helper_##NAME(__VA_ARGS__) \
236     { qemu_build_not_reached(); }
237 
238 #ifdef CONFIG_USER_ONLY
239 STUB_HELPER(clgi, TCGv_env env)
240 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
241 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
242 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
243 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
244 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
245 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
246 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
247 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
248 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
249 STUB_HELPER(rdmsr, TCGv_env env)
250 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
251 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
252 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
253 STUB_HELPER(stgi, TCGv_env env)
254 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
255 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
256 STUB_HELPER(vmmcall, TCGv_env env)
257 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
258 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
259 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
260 STUB_HELPER(wrmsr, TCGv_env env)
261 #endif
262 
263 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
264 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
265 static void gen_exception_gpf(DisasContext *s);
266 
267 /* i386 shift ops */
268 enum {
269     OP_ROL,
270     OP_ROR,
271     OP_RCL,
272     OP_RCR,
273     OP_SHL,
274     OP_SHR,
275     OP_SHL1, /* undocumented */
276     OP_SAR = 7,
277 };
278 
279 enum {
280     JCC_O,
281     JCC_B,
282     JCC_Z,
283     JCC_BE,
284     JCC_S,
285     JCC_P,
286     JCC_L,
287     JCC_LE,
288 };
289 
290 enum {
291     /* I386 int registers */
292     OR_EAX,   /* MUST be even numbered */
293     OR_ECX,
294     OR_EDX,
295     OR_EBX,
296     OR_ESP,
297     OR_EBP,
298     OR_ESI,
299     OR_EDI,
300 
301     OR_TMP0 = 16,    /* temporary operand register */
302     OR_TMP1,
303     OR_A0, /* temporary register used when doing address evaluation */
304 };
305 
306 enum {
307     USES_CC_DST  = 1,
308     USES_CC_SRC  = 2,
309     USES_CC_SRC2 = 4,
310     USES_CC_SRCT = 8,
311 };
312 
313 /* Bit set if the global variable is live after setting CC_OP to X.  */
314 static const uint8_t cc_op_live[CC_OP_NB] = {
315     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
316     [CC_OP_EFLAGS] = USES_CC_SRC,
317     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
318     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
319     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
320     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
321     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
322     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
323     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
324     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
325     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
326     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
327     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
328     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
329     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
330     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
331     [CC_OP_CLR] = 0,
332     [CC_OP_POPCNT] = USES_CC_SRC,
333 };
334 
set_cc_op_1(DisasContext * s,CCOp op,bool dirty)335 static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
336 {
337     int dead;
338 
339     if (s->cc_op == op) {
340         return;
341     }
342 
343     /* Discard CC computation that will no longer be used.  */
344     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
345     if (dead & USES_CC_DST) {
346         tcg_gen_discard_tl(cpu_cc_dst);
347     }
348     if (dead & USES_CC_SRC) {
349         tcg_gen_discard_tl(cpu_cc_src);
350     }
351     if (dead & USES_CC_SRC2) {
352         tcg_gen_discard_tl(cpu_cc_src2);
353     }
354     if (dead & USES_CC_SRCT) {
355         tcg_gen_discard_tl(s->cc_srcT);
356     }
357 
358     if (dirty && s->cc_op == CC_OP_DYNAMIC) {
359         tcg_gen_discard_i32(cpu_cc_op);
360     }
361     s->cc_op_dirty = dirty;
362     s->cc_op = op;
363 }
364 
set_cc_op(DisasContext * s,CCOp op)365 static void set_cc_op(DisasContext *s, CCOp op)
366 {
367     /*
368      * The DYNAMIC setting is translator only, everything else
369      * will be spilled later.
370      */
371     set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
372 }
373 
assume_cc_op(DisasContext * s,CCOp op)374 static void assume_cc_op(DisasContext *s, CCOp op)
375 {
376     set_cc_op_1(s, op, false);
377 }
378 
gen_update_cc_op(DisasContext * s)379 static void gen_update_cc_op(DisasContext *s)
380 {
381     if (s->cc_op_dirty) {
382         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
383         s->cc_op_dirty = false;
384     }
385 }
386 
387 #ifdef TARGET_X86_64
388 
389 #define NB_OP_SIZES 4
390 
391 #else /* !TARGET_X86_64 */
392 
393 #define NB_OP_SIZES 3
394 
395 #endif /* !TARGET_X86_64 */
396 
397 #if HOST_BIG_ENDIAN
398 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
399 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
400 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
401 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
402 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
403 #else
404 #define REG_B_OFFSET 0
405 #define REG_H_OFFSET 1
406 #define REG_W_OFFSET 0
407 #define REG_L_OFFSET 0
408 #define REG_LH_OFFSET 4
409 #endif
410 
411 /* In instruction encodings for byte register accesses the
412  * register number usually indicates "low 8 bits of register N";
413  * however there are some special cases where N 4..7 indicates
414  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
415  * true for this special case, false otherwise.
416  */
byte_reg_is_xH(DisasContext * s,int reg)417 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
418 {
419     /* Any time the REX prefix is present, byte registers are uniform */
420     if (reg < 4 || REX_PREFIX(s)) {
421         return false;
422     }
423     return true;
424 }
425 
426 /* Select the size of a push/pop operation.  */
mo_pushpop(DisasContext * s,MemOp ot)427 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
428 {
429     if (CODE64(s)) {
430         return ot == MO_16 ? MO_16 : MO_64;
431     } else {
432         return ot;
433     }
434 }
435 
436 /* Select the size of the stack pointer.  */
mo_stacksize(DisasContext * s)437 static inline MemOp mo_stacksize(DisasContext *s)
438 {
439     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
440 }
441 
442 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
443    byte vs word opcodes.  */
mo_b_d(int b,MemOp ot)444 static inline MemOp mo_b_d(int b, MemOp ot)
445 {
446     return b & 1 ? ot : MO_8;
447 }
448 
449 /* Compute the result of writing t0 to the OT-sized register REG.
450  *
451  * If DEST is NULL, store the result into the register and return the
452  * register's TCGv.
453  *
454  * If DEST is not NULL, store the result into DEST and return the
455  * register's TCGv.
456  */
gen_op_deposit_reg_v(DisasContext * s,MemOp ot,int reg,TCGv dest,TCGv t0)457 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
458 {
459     switch(ot) {
460     case MO_8:
461         if (byte_reg_is_xH(s, reg)) {
462             dest = dest ? dest : cpu_regs[reg - 4];
463             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
464             return cpu_regs[reg - 4];
465         }
466         dest = dest ? dest : cpu_regs[reg];
467         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
468         break;
469     case MO_16:
470         dest = dest ? dest : cpu_regs[reg];
471         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
472         break;
473     case MO_32:
474         /* For x86_64, this sets the higher half of register to zero.
475            For i386, this is equivalent to a mov. */
476         dest = dest ? dest : cpu_regs[reg];
477         tcg_gen_ext32u_tl(dest, t0);
478         break;
479 #ifdef TARGET_X86_64
480     case MO_64:
481         dest = dest ? dest : cpu_regs[reg];
482         tcg_gen_mov_tl(dest, t0);
483         break;
484 #endif
485     default:
486         g_assert_not_reached();
487     }
488     return cpu_regs[reg];
489 }
490 
gen_op_mov_reg_v(DisasContext * s,MemOp ot,int reg,TCGv t0)491 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
492 {
493     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
494 }
495 
496 static inline
gen_op_mov_v_reg(DisasContext * s,MemOp ot,TCGv t0,int reg)497 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
498 {
499     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
500         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
501     } else {
502         tcg_gen_mov_tl(t0, cpu_regs[reg]);
503     }
504 }
505 
gen_add_A0_im(DisasContext * s,int val)506 static void gen_add_A0_im(DisasContext *s, int val)
507 {
508     tcg_gen_addi_tl(s->A0, s->A0, val);
509     if (!CODE64(s)) {
510         tcg_gen_ext32u_tl(s->A0, s->A0);
511     }
512 }
513 
gen_op_jmp_v(DisasContext * s,TCGv dest)514 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
515 {
516     tcg_gen_mov_tl(cpu_eip, dest);
517     s->pc_save = -1;
518 }
519 
520 static inline
gen_op_add_reg_im(DisasContext * s,MemOp size,int reg,int32_t val)521 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
522 {
523     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
524     gen_op_mov_reg_v(s, size, reg, s->tmp0);
525 }
526 
gen_op_add_reg(DisasContext * s,MemOp size,int reg,TCGv val)527 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
528 {
529     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
530     gen_op_mov_reg_v(s, size, reg, s->tmp0);
531 }
532 
gen_op_ld_v(DisasContext * s,int idx,TCGv t0,TCGv a0)533 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
534 {
535     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
536 }
537 
gen_op_st_v(DisasContext * s,int idx,TCGv t0,TCGv a0)538 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
539 {
540     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
541 }
542 
gen_op_st_rm_T0_A0(DisasContext * s,int idx,int d)543 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
544 {
545     if (d == OR_TMP0) {
546         gen_op_st_v(s, idx, s->T0, s->A0);
547     } else {
548         gen_op_mov_reg_v(s, idx, d, s->T0);
549     }
550 }
551 
gen_update_eip_cur(DisasContext * s)552 static void gen_update_eip_cur(DisasContext *s)
553 {
554     assert(s->pc_save != -1);
555     if (tb_cflags(s->base.tb) & CF_PCREL) {
556         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
557     } else if (CODE64(s)) {
558         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
559     } else {
560         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
561     }
562     s->pc_save = s->base.pc_next;
563 }
564 
cur_insn_len(DisasContext * s)565 static int cur_insn_len(DisasContext *s)
566 {
567     return s->pc - s->base.pc_next;
568 }
569 
cur_insn_len_i32(DisasContext * s)570 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
571 {
572     return tcg_constant_i32(cur_insn_len(s));
573 }
574 
eip_next_i32(DisasContext * s)575 static TCGv_i32 eip_next_i32(DisasContext *s)
576 {
577     assert(s->pc_save != -1);
578     /*
579      * This function has two users: lcall_real (always 16-bit mode), and
580      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
581      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
582      * why passing a 32-bit value isn't broken.  To avoid using this where
583      * we shouldn't, return -1 in 64-bit mode so that execution goes into
584      * the weeds quickly.
585      */
586     if (CODE64(s)) {
587         return tcg_constant_i32(-1);
588     }
589     if (tb_cflags(s->base.tb) & CF_PCREL) {
590         TCGv_i32 ret = tcg_temp_new_i32();
591         tcg_gen_trunc_tl_i32(ret, cpu_eip);
592         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
593         return ret;
594     } else {
595         return tcg_constant_i32(s->pc - s->cs_base);
596     }
597 }
598 
eip_next_tl(DisasContext * s)599 static TCGv eip_next_tl(DisasContext *s)
600 {
601     assert(s->pc_save != -1);
602     if (tb_cflags(s->base.tb) & CF_PCREL) {
603         TCGv ret = tcg_temp_new();
604         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
605         return ret;
606     } else if (CODE64(s)) {
607         return tcg_constant_tl(s->pc);
608     } else {
609         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
610     }
611 }
612 
eip_cur_tl(DisasContext * s)613 static TCGv eip_cur_tl(DisasContext *s)
614 {
615     assert(s->pc_save != -1);
616     if (tb_cflags(s->base.tb) & CF_PCREL) {
617         TCGv ret = tcg_temp_new();
618         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
619         return ret;
620     } else if (CODE64(s)) {
621         return tcg_constant_tl(s->base.pc_next);
622     } else {
623         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
624     }
625 }
626 
627 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
628    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
629    indicate no override.  */
gen_lea_v_seg_dest(DisasContext * s,MemOp aflag,TCGv dest,TCGv a0,int def_seg,int ovr_seg)630 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
631                                int def_seg, int ovr_seg)
632 {
633     switch (aflag) {
634 #ifdef TARGET_X86_64
635     case MO_64:
636         if (ovr_seg < 0) {
637             tcg_gen_mov_tl(dest, a0);
638             return;
639         }
640         break;
641 #endif
642     case MO_32:
643         /* 32 bit address */
644         if (ovr_seg < 0 && ADDSEG(s)) {
645             ovr_seg = def_seg;
646         }
647         if (ovr_seg < 0) {
648             tcg_gen_ext32u_tl(dest, a0);
649             return;
650         }
651         break;
652     case MO_16:
653         /* 16 bit address */
654         tcg_gen_ext16u_tl(dest, a0);
655         a0 = dest;
656         if (ovr_seg < 0) {
657             if (ADDSEG(s)) {
658                 ovr_seg = def_seg;
659             } else {
660                 return;
661             }
662         }
663         break;
664     default:
665         g_assert_not_reached();
666     }
667 
668     if (ovr_seg >= 0) {
669         TCGv seg = cpu_seg_base[ovr_seg];
670 
671         if (aflag == MO_64) {
672             tcg_gen_add_tl(dest, a0, seg);
673         } else if (CODE64(s)) {
674             tcg_gen_ext32u_tl(dest, a0);
675             tcg_gen_add_tl(dest, dest, seg);
676         } else {
677             tcg_gen_add_tl(dest, a0, seg);
678             tcg_gen_ext32u_tl(dest, dest);
679         }
680     }
681 }
682 
gen_lea_v_seg(DisasContext * s,TCGv a0,int def_seg,int ovr_seg)683 static void gen_lea_v_seg(DisasContext *s, TCGv a0,
684                           int def_seg, int ovr_seg)
685 {
686     gen_lea_v_seg_dest(s, s->aflag, s->A0, a0, def_seg, ovr_seg);
687 }
688 
gen_string_movl_A0_ESI(DisasContext * s)689 static inline void gen_string_movl_A0_ESI(DisasContext *s)
690 {
691     gen_lea_v_seg(s, cpu_regs[R_ESI], R_DS, s->override);
692 }
693 
gen_string_movl_A0_EDI(DisasContext * s)694 static inline void gen_string_movl_A0_EDI(DisasContext *s)
695 {
696     gen_lea_v_seg(s, cpu_regs[R_EDI], R_ES, -1);
697 }
698 
gen_compute_Dshift(DisasContext * s,MemOp ot)699 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
700 {
701     TCGv dshift = tcg_temp_new();
702     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
703     tcg_gen_shli_tl(dshift, dshift, ot);
704     return dshift;
705 };
706 
gen_ext_tl(TCGv dst,TCGv src,MemOp size,bool sign)707 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
708 {
709     if (size == MO_TL) {
710         return src;
711     }
712     if (!dst) {
713         dst = tcg_temp_new();
714     }
715     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
716     return dst;
717 }
718 
gen_extu(MemOp ot,TCGv reg)719 static void gen_extu(MemOp ot, TCGv reg)
720 {
721     gen_ext_tl(reg, reg, ot, false);
722 }
723 
gen_exts(MemOp ot,TCGv reg)724 static void gen_exts(MemOp ot, TCGv reg)
725 {
726     gen_ext_tl(reg, reg, ot, true);
727 }
728 
gen_op_j_ecx(DisasContext * s,TCGCond cond,TCGLabel * label1)729 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
730 {
731     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
732 
733     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
734 }
735 
gen_op_jz_ecx(DisasContext * s,TCGLabel * label1)736 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
737 {
738     gen_op_j_ecx(s, TCG_COND_EQ, label1);
739 }
740 
gen_op_jnz_ecx(DisasContext * s,TCGLabel * label1)741 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
742 {
743     gen_op_j_ecx(s, TCG_COND_NE, label1);
744 }
745 
gen_helper_in_func(MemOp ot,TCGv v,TCGv_i32 n)746 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
747 {
748     switch (ot) {
749     case MO_8:
750         gen_helper_inb(v, tcg_env, n);
751         break;
752     case MO_16:
753         gen_helper_inw(v, tcg_env, n);
754         break;
755     case MO_32:
756         gen_helper_inl(v, tcg_env, n);
757         break;
758     default:
759         g_assert_not_reached();
760     }
761 }
762 
gen_helper_out_func(MemOp ot,TCGv_i32 v,TCGv_i32 n)763 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
764 {
765     switch (ot) {
766     case MO_8:
767         gen_helper_outb(tcg_env, v, n);
768         break;
769     case MO_16:
770         gen_helper_outw(tcg_env, v, n);
771         break;
772     case MO_32:
773         gen_helper_outl(tcg_env, v, n);
774         break;
775     default:
776         g_assert_not_reached();
777     }
778 }
779 
780 /*
781  * Validate that access to [port, port + 1<<ot) is allowed.
782  * Raise #GP, or VMM exit if not.
783  */
gen_check_io(DisasContext * s,MemOp ot,TCGv_i32 port,uint32_t svm_flags)784 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
785                          uint32_t svm_flags)
786 {
787 #ifdef CONFIG_USER_ONLY
788     /*
789      * We do not implement the ioperm(2) syscall, so the TSS check
790      * will always fail.
791      */
792     gen_exception_gpf(s);
793     return false;
794 #else
795     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
796         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
797     }
798     if (GUEST(s)) {
799         gen_update_cc_op(s);
800         gen_update_eip_cur(s);
801         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
802             svm_flags |= SVM_IOIO_REP_MASK;
803         }
804         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
805         gen_helper_svm_check_io(tcg_env, port,
806                                 tcg_constant_i32(svm_flags),
807                                 cur_insn_len_i32(s));
808     }
809     return true;
810 #endif
811 }
812 
gen_movs(DisasContext * s,MemOp ot)813 static void gen_movs(DisasContext *s, MemOp ot)
814 {
815     TCGv dshift;
816 
817     gen_string_movl_A0_ESI(s);
818     gen_op_ld_v(s, ot, s->T0, s->A0);
819     gen_string_movl_A0_EDI(s);
820     gen_op_st_v(s, ot, s->T0, s->A0);
821 
822     dshift = gen_compute_Dshift(s, ot);
823     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
824     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
825 }
826 
gen_op_update1_cc(DisasContext * s)827 static void gen_op_update1_cc(DisasContext *s)
828 {
829     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
830 }
831 
gen_op_update2_cc(DisasContext * s)832 static void gen_op_update2_cc(DisasContext *s)
833 {
834     tcg_gen_mov_tl(cpu_cc_src, s->T1);
835     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
836 }
837 
838 /* compute all eflags to reg */
gen_mov_eflags(DisasContext * s,TCGv reg)839 static void gen_mov_eflags(DisasContext *s, TCGv reg)
840 {
841     TCGv dst, src1, src2;
842     TCGv_i32 cc_op;
843     int live, dead;
844 
845     if (s->cc_op == CC_OP_EFLAGS) {
846         tcg_gen_mov_tl(reg, cpu_cc_src);
847         return;
848     }
849     if (s->cc_op == CC_OP_CLR) {
850         tcg_gen_movi_tl(reg, CC_Z | CC_P);
851         return;
852     }
853 
854     dst = cpu_cc_dst;
855     src1 = cpu_cc_src;
856     src2 = cpu_cc_src2;
857 
858     /* Take care to not read values that are not live.  */
859     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
860     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
861     if (dead) {
862         TCGv zero = tcg_constant_tl(0);
863         if (dead & USES_CC_DST) {
864             dst = zero;
865         }
866         if (dead & USES_CC_SRC) {
867             src1 = zero;
868         }
869         if (dead & USES_CC_SRC2) {
870             src2 = zero;
871         }
872     }
873 
874     if (s->cc_op != CC_OP_DYNAMIC) {
875         cc_op = tcg_constant_i32(s->cc_op);
876     } else {
877         cc_op = cpu_cc_op;
878     }
879     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
880 }
881 
882 /* compute all eflags to cc_src */
gen_compute_eflags(DisasContext * s)883 static void gen_compute_eflags(DisasContext *s)
884 {
885     gen_mov_eflags(s, cpu_cc_src);
886     set_cc_op(s, CC_OP_EFLAGS);
887 }
888 
889 typedef struct CCPrepare {
890     TCGCond cond;
891     TCGv reg;
892     TCGv reg2;
893     target_ulong imm;
894     bool use_reg2;
895     bool no_setcond;
896 } CCPrepare;
897 
gen_prepare_sign_nz(TCGv src,MemOp size)898 static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
899 {
900     if (size == MO_TL) {
901         return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
902     } else {
903         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
904                              .imm = 1ull << ((8 << size) - 1) };
905     }
906 }
907 
908 /* compute eflags.C, trying to store it in reg if not NULL */
gen_prepare_eflags_c(DisasContext * s,TCGv reg)909 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
910 {
911     MemOp size;
912 
913     switch (s->cc_op) {
914     case CC_OP_SUBB ... CC_OP_SUBQ:
915         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
916         size = s->cc_op - CC_OP_SUBB;
917         gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
918         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
919         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
920                              .reg2 = cpu_cc_src, .use_reg2 = true };
921 
922     case CC_OP_ADDB ... CC_OP_ADDQ:
923         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
924         size = s->cc_op - CC_OP_ADDB;
925         gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
926         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
927         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
928                              .reg2 = cpu_cc_src, .use_reg2 = true };
929 
930     case CC_OP_LOGICB ... CC_OP_LOGICQ:
931     case CC_OP_CLR:
932     case CC_OP_POPCNT:
933         return (CCPrepare) { .cond = TCG_COND_NEVER };
934 
935     case CC_OP_INCB ... CC_OP_INCQ:
936     case CC_OP_DECB ... CC_OP_DECQ:
937         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
938                              .no_setcond = true };
939 
940     case CC_OP_SHLB ... CC_OP_SHLQ:
941         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
942         size = s->cc_op - CC_OP_SHLB;
943         return gen_prepare_sign_nz(cpu_cc_src, size);
944 
945     case CC_OP_MULB ... CC_OP_MULQ:
946         return (CCPrepare) { .cond = TCG_COND_NE,
947                              .reg = cpu_cc_src };
948 
949     case CC_OP_BMILGB ... CC_OP_BMILGQ:
950         size = s->cc_op - CC_OP_BMILGB;
951         gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
952         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
953 
954     case CC_OP_ADCX:
955     case CC_OP_ADCOX:
956         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
957                              .no_setcond = true };
958 
959     case CC_OP_EFLAGS:
960     case CC_OP_SARB ... CC_OP_SARQ:
961         /* CC_SRC & 1 */
962         return (CCPrepare) { .cond = TCG_COND_TSTNE,
963                              .reg = cpu_cc_src, .imm = CC_C };
964 
965     default:
966        /* The need to compute only C from CC_OP_DYNAMIC is important
967           in efficiently implementing e.g. INC at the start of a TB.  */
968        gen_update_cc_op(s);
969        if (!reg) {
970            reg = tcg_temp_new();
971        }
972        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
973                                cpu_cc_src2, cpu_cc_op);
974        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
975                             .no_setcond = true };
976     }
977 }
978 
979 /* compute eflags.P, trying to store it in reg if not NULL */
gen_prepare_eflags_p(DisasContext * s,TCGv reg)980 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
981 {
982     gen_compute_eflags(s);
983     return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
984                          .imm = CC_P };
985 }
986 
987 /* compute eflags.S, trying to store it in reg if not NULL */
gen_prepare_eflags_s(DisasContext * s,TCGv reg)988 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
989 {
990     switch (s->cc_op) {
991     case CC_OP_DYNAMIC:
992         gen_compute_eflags(s);
993         /* FALLTHRU */
994     case CC_OP_EFLAGS:
995     case CC_OP_ADCX:
996     case CC_OP_ADOX:
997     case CC_OP_ADCOX:
998         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
999                              .imm = CC_S };
1000     case CC_OP_CLR:
1001     case CC_OP_POPCNT:
1002         return (CCPrepare) { .cond = TCG_COND_NEVER };
1003     default:
1004         {
1005             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1006             return gen_prepare_sign_nz(cpu_cc_dst, size);
1007         }
1008     }
1009 }
1010 
1011 /* compute eflags.O, trying to store it in reg if not NULL */
gen_prepare_eflags_o(DisasContext * s,TCGv reg)1012 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1013 {
1014     switch (s->cc_op) {
1015     case CC_OP_ADOX:
1016     case CC_OP_ADCOX:
1017         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1018                              .no_setcond = true };
1019     case CC_OP_CLR:
1020     case CC_OP_POPCNT:
1021         return (CCPrepare) { .cond = TCG_COND_NEVER };
1022     case CC_OP_MULB ... CC_OP_MULQ:
1023         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
1024     default:
1025         gen_compute_eflags(s);
1026         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1027                              .imm = CC_O };
1028     }
1029 }
1030 
1031 /* compute eflags.Z, trying to store it in reg if not NULL */
gen_prepare_eflags_z(DisasContext * s,TCGv reg)1032 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1033 {
1034     switch (s->cc_op) {
1035     case CC_OP_DYNAMIC:
1036         gen_compute_eflags(s);
1037         /* FALLTHRU */
1038     case CC_OP_EFLAGS:
1039     case CC_OP_ADCX:
1040     case CC_OP_ADOX:
1041     case CC_OP_ADCOX:
1042         return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1043                              .imm = CC_Z };
1044     case CC_OP_CLR:
1045         return (CCPrepare) { .cond = TCG_COND_ALWAYS };
1046     case CC_OP_POPCNT:
1047         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
1048     default:
1049         {
1050             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1051             if (size == MO_TL) {
1052                 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
1053             } else {
1054                 return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst,
1055                                      .imm = (1ull << (8 << size)) - 1 };
1056             }
1057         }
1058     }
1059 }
1060 
1061 /* return how to compute jump opcode 'b'.  'reg' can be clobbered
1062  * if needed; it may be used for CCPrepare.reg if that will
1063  * provide more freedom in the translation of a subsequent setcond. */
gen_prepare_cc(DisasContext * s,int b,TCGv reg)1064 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1065 {
1066     int inv, jcc_op, cond;
1067     MemOp size;
1068     CCPrepare cc;
1069 
1070     inv = b & 1;
1071     jcc_op = (b >> 1) & 7;
1072 
1073     switch (s->cc_op) {
1074     case CC_OP_SUBB ... CC_OP_SUBQ:
1075         /* We optimize relational operators for the cmp/jcc case.  */
1076         size = s->cc_op - CC_OP_SUBB;
1077         switch (jcc_op) {
1078         case JCC_BE:
1079             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
1080             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
1081             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
1082                                .reg2 = cpu_cc_src, .use_reg2 = true };
1083             break;
1084         case JCC_L:
1085             cond = TCG_COND_LT;
1086             goto fast_jcc_l;
1087         case JCC_LE:
1088             cond = TCG_COND_LE;
1089         fast_jcc_l:
1090             gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
1091             gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
1092             cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
1093                                .reg2 = cpu_cc_src, .use_reg2 = true };
1094             break;
1095 
1096         default:
1097             goto slow_jcc;
1098         }
1099         break;
1100 
1101     default:
1102     slow_jcc:
1103         /* This actually generates good code for JC, JZ and JS.  */
1104         switch (jcc_op) {
1105         case JCC_O:
1106             cc = gen_prepare_eflags_o(s, reg);
1107             break;
1108         case JCC_B:
1109             cc = gen_prepare_eflags_c(s, reg);
1110             break;
1111         case JCC_Z:
1112             cc = gen_prepare_eflags_z(s, reg);
1113             break;
1114         case JCC_BE:
1115             gen_compute_eflags(s);
1116             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
1117                                .imm = CC_Z | CC_C };
1118             break;
1119         case JCC_S:
1120             cc = gen_prepare_eflags_s(s, reg);
1121             break;
1122         case JCC_P:
1123             cc = gen_prepare_eflags_p(s, reg);
1124             break;
1125         case JCC_L:
1126             gen_compute_eflags(s);
1127             if (!reg || reg == cpu_cc_src) {
1128                 reg = tcg_temp_new();
1129             }
1130             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1131             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1132                                .imm = CC_O };
1133             break;
1134         default:
1135         case JCC_LE:
1136             gen_compute_eflags(s);
1137             if (!reg || reg == cpu_cc_src) {
1138                 reg = tcg_temp_new();
1139             }
1140             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1141             cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
1142                                .imm = CC_O | CC_Z };
1143             break;
1144         }
1145         break;
1146     }
1147 
1148     if (inv) {
1149         cc.cond = tcg_invert_cond(cc.cond);
1150     }
1151     return cc;
1152 }
1153 
gen_setcc1(DisasContext * s,int b,TCGv reg)1154 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1155 {
1156     CCPrepare cc = gen_prepare_cc(s, b, reg);
1157 
1158     if (cc.no_setcond) {
1159         if (cc.cond == TCG_COND_EQ) {
1160             tcg_gen_xori_tl(reg, cc.reg, 1);
1161         } else {
1162             tcg_gen_mov_tl(reg, cc.reg);
1163         }
1164         return;
1165     }
1166 
1167     if (cc.use_reg2) {
1168         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1169     } else {
1170         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1171     }
1172 }
1173 
gen_compute_eflags_c(DisasContext * s,TCGv reg)1174 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1175 {
1176     gen_setcc1(s, JCC_B << 1, reg);
1177 }
1178 
1179 /* generate a conditional jump to label 'l1' according to jump opcode
1180    value 'b'. In the fast case, T0 is guaranteed not to be used. */
gen_jcc1_noeob(DisasContext * s,int b,TCGLabel * l1)1181 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1182 {
1183     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1184 
1185     if (cc.use_reg2) {
1186         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1187     } else {
1188         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1189     }
1190 }
1191 
1192 /* Generate a conditional jump to label 'l1' according to jump opcode
1193    value 'b'. In the fast case, T0 is guaranteed not to be used.
1194    One or both of the branches will call gen_jmp_rel, so ensure
1195    cc_op is clean.  */
gen_jcc1(DisasContext * s,int b,TCGLabel * l1)1196 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1197 {
1198     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1199 
1200     gen_update_cc_op(s);
1201     if (cc.use_reg2) {
1202         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1203     } else {
1204         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1205     }
1206 }
1207 
1208 /* XXX: does not work with gdbstub "ice" single step - not a
1209    serious problem.  The caller can jump to the returned label
1210    to stop the REP but, if the flags have changed, it has to call
1211    gen_update_cc_op before doing so.  */
gen_jz_ecx_string(DisasContext * s)1212 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1213 {
1214     TCGLabel *l1 = gen_new_label();
1215     TCGLabel *l2 = gen_new_label();
1216 
1217     gen_update_cc_op(s);
1218     gen_op_jnz_ecx(s, l1);
1219     gen_set_label(l2);
1220     gen_jmp_rel_csize(s, 0, 1);
1221     gen_set_label(l1);
1222     return l2;
1223 }
1224 
gen_stos(DisasContext * s,MemOp ot)1225 static void gen_stos(DisasContext *s, MemOp ot)
1226 {
1227     gen_string_movl_A0_EDI(s);
1228     gen_op_st_v(s, ot, s->T0, s->A0);
1229     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1230 }
1231 
gen_lods(DisasContext * s,MemOp ot)1232 static void gen_lods(DisasContext *s, MemOp ot)
1233 {
1234     gen_string_movl_A0_ESI(s);
1235     gen_op_ld_v(s, ot, s->T0, s->A0);
1236     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1237     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1238 }
1239 
gen_scas(DisasContext * s,MemOp ot)1240 static void gen_scas(DisasContext *s, MemOp ot)
1241 {
1242     gen_string_movl_A0_EDI(s);
1243     gen_op_ld_v(s, ot, s->T1, s->A0);
1244     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1245     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1246     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1247     set_cc_op(s, CC_OP_SUBB + ot);
1248 
1249     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1250 }
1251 
gen_cmps(DisasContext * s,MemOp ot)1252 static void gen_cmps(DisasContext *s, MemOp ot)
1253 {
1254     TCGv dshift;
1255 
1256     gen_string_movl_A0_EDI(s);
1257     gen_op_ld_v(s, ot, s->T1, s->A0);
1258     gen_string_movl_A0_ESI(s);
1259     gen_op_ld_v(s, ot, s->T0, s->A0);
1260     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1261     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1262     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1263     set_cc_op(s, CC_OP_SUBB + ot);
1264 
1265     dshift = gen_compute_Dshift(s, ot);
1266     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1267     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1268 }
1269 
gen_bpt_io(DisasContext * s,TCGv_i32 t_port,int ot)1270 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1271 {
1272     if (s->flags & HF_IOBPT_MASK) {
1273 #ifdef CONFIG_USER_ONLY
1274         /* user-mode cpu should not be in IOBPT mode */
1275         g_assert_not_reached();
1276 #else
1277         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1278         TCGv t_next = eip_next_tl(s);
1279         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1280 #endif /* CONFIG_USER_ONLY */
1281     }
1282 }
1283 
gen_ins(DisasContext * s,MemOp ot)1284 static void gen_ins(DisasContext *s, MemOp ot)
1285 {
1286     gen_string_movl_A0_EDI(s);
1287     /* Note: we must do this dummy write first to be restartable in
1288        case of page fault. */
1289     tcg_gen_movi_tl(s->T0, 0);
1290     gen_op_st_v(s, ot, s->T0, s->A0);
1291     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1292     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1293     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1294     gen_op_st_v(s, ot, s->T0, s->A0);
1295     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1296     gen_bpt_io(s, s->tmp2_i32, ot);
1297 }
1298 
gen_outs(DisasContext * s,MemOp ot)1299 static void gen_outs(DisasContext *s, MemOp ot)
1300 {
1301     gen_string_movl_A0_ESI(s);
1302     gen_op_ld_v(s, ot, s->T0, s->A0);
1303 
1304     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1305     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1306     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1307     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1308     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1309     gen_bpt_io(s, s->tmp2_i32, ot);
1310 }
1311 
1312 /* Generate jumps to current or next instruction */
gen_repz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1313 static void gen_repz(DisasContext *s, MemOp ot,
1314                      void (*fn)(DisasContext *s, MemOp ot))
1315 {
1316     TCGLabel *l2;
1317     l2 = gen_jz_ecx_string(s);
1318     fn(s, ot);
1319     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1320     /*
1321      * A loop would cause two single step exceptions if ECX = 1
1322      * before rep string_insn
1323      */
1324     if (s->repz_opt) {
1325         gen_op_jz_ecx(s, l2);
1326     }
1327     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1328 }
1329 
gen_repz_nz(DisasContext * s,MemOp ot,void (* fn)(DisasContext * s,MemOp ot))1330 static void gen_repz_nz(DisasContext *s, MemOp ot,
1331                         void (*fn)(DisasContext *s, MemOp ot))
1332 {
1333     TCGLabel *l2;
1334     int nz = (s->prefix & PREFIX_REPNZ) ? 1 : 0;
1335 
1336     l2 = gen_jz_ecx_string(s);
1337     fn(s, ot);
1338     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1339     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1340     if (s->repz_opt) {
1341         gen_op_jz_ecx(s, l2);
1342     }
1343     /*
1344      * Only one iteration is done at a time, so the translation
1345      * block ends unconditionally after this instruction and there
1346      * is no control flow junction - no need to set CC_OP_DYNAMIC.
1347      */
1348     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1349 }
1350 
gen_helper_fp_arith_ST0_FT0(int op)1351 static void gen_helper_fp_arith_ST0_FT0(int op)
1352 {
1353     switch (op) {
1354     case 0:
1355         gen_helper_fadd_ST0_FT0(tcg_env);
1356         break;
1357     case 1:
1358         gen_helper_fmul_ST0_FT0(tcg_env);
1359         break;
1360     case 2:
1361         gen_helper_fcom_ST0_FT0(tcg_env);
1362         break;
1363     case 3:
1364         gen_helper_fcom_ST0_FT0(tcg_env);
1365         break;
1366     case 4:
1367         gen_helper_fsub_ST0_FT0(tcg_env);
1368         break;
1369     case 5:
1370         gen_helper_fsubr_ST0_FT0(tcg_env);
1371         break;
1372     case 6:
1373         gen_helper_fdiv_ST0_FT0(tcg_env);
1374         break;
1375     case 7:
1376         gen_helper_fdivr_ST0_FT0(tcg_env);
1377         break;
1378     }
1379 }
1380 
1381 /* NOTE the exception in "r" op ordering */
gen_helper_fp_arith_STN_ST0(int op,int opreg)1382 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1383 {
1384     TCGv_i32 tmp = tcg_constant_i32(opreg);
1385     switch (op) {
1386     case 0:
1387         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1388         break;
1389     case 1:
1390         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1391         break;
1392     case 4:
1393         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1394         break;
1395     case 5:
1396         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1397         break;
1398     case 6:
1399         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1400         break;
1401     case 7:
1402         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1403         break;
1404     }
1405 }
1406 
gen_exception(DisasContext * s,int trapno)1407 static void gen_exception(DisasContext *s, int trapno)
1408 {
1409     gen_update_cc_op(s);
1410     gen_update_eip_cur(s);
1411     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1412     s->base.is_jmp = DISAS_NORETURN;
1413 }
1414 
1415 /* Generate #UD for the current instruction.  The assumption here is that
1416    the instruction is known, but it isn't allowed in the current cpu mode.  */
gen_illegal_opcode(DisasContext * s)1417 static void gen_illegal_opcode(DisasContext *s)
1418 {
1419     gen_exception(s, EXCP06_ILLOP);
1420 }
1421 
1422 /* Generate #GP for the current instruction. */
gen_exception_gpf(DisasContext * s)1423 static void gen_exception_gpf(DisasContext *s)
1424 {
1425     gen_exception(s, EXCP0D_GPF);
1426 }
1427 
1428 /* Check for cpl == 0; if not, raise #GP and return false. */
check_cpl0(DisasContext * s)1429 static bool check_cpl0(DisasContext *s)
1430 {
1431     if (CPL(s) == 0) {
1432         return true;
1433     }
1434     gen_exception_gpf(s);
1435     return false;
1436 }
1437 
gen_shift_flags(DisasContext * s,MemOp ot,TCGv result,TCGv shm1,TCGv count,bool is_right)1438 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1439                             TCGv shm1, TCGv count, bool is_right)
1440 {
1441     TCGv_i32 z32, s32, oldop;
1442     TCGv z_tl;
1443 
1444     /* Store the results into the CC variables.  If we know that the
1445        variable must be dead, store unconditionally.  Otherwise we'll
1446        need to not disrupt the current contents.  */
1447     z_tl = tcg_constant_tl(0);
1448     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1449         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1450                            result, cpu_cc_dst);
1451     } else {
1452         tcg_gen_mov_tl(cpu_cc_dst, result);
1453     }
1454     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1455         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1456                            shm1, cpu_cc_src);
1457     } else {
1458         tcg_gen_mov_tl(cpu_cc_src, shm1);
1459     }
1460 
1461     /* Get the two potential CC_OP values into temporaries.  */
1462     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1463     if (s->cc_op == CC_OP_DYNAMIC) {
1464         oldop = cpu_cc_op;
1465     } else {
1466         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1467         oldop = s->tmp3_i32;
1468     }
1469 
1470     /* Conditionally store the CC_OP value.  */
1471     z32 = tcg_constant_i32(0);
1472     s32 = tcg_temp_new_i32();
1473     tcg_gen_trunc_tl_i32(s32, count);
1474     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1475 
1476     /* The CC_OP value is no longer predictable.  */
1477     set_cc_op(s, CC_OP_DYNAMIC);
1478 }
1479 
1480 /* XXX: add faster immediate case */
gen_shiftd_rm_T1(DisasContext * s,MemOp ot,int op1,bool is_right,TCGv count_in)1481 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1482                              bool is_right, TCGv count_in)
1483 {
1484     target_ulong mask = (ot == MO_64 ? 63 : 31);
1485     TCGv count;
1486 
1487     /* load */
1488     if (op1 == OR_TMP0) {
1489         gen_op_ld_v(s, ot, s->T0, s->A0);
1490     } else {
1491         gen_op_mov_v_reg(s, ot, s->T0, op1);
1492     }
1493 
1494     count = tcg_temp_new();
1495     tcg_gen_andi_tl(count, count_in, mask);
1496 
1497     switch (ot) {
1498     case MO_16:
1499         /* Note: we implement the Intel behaviour for shift count > 16.
1500            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1501            portion by constructing it as a 32-bit value.  */
1502         if (is_right) {
1503             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1504             tcg_gen_mov_tl(s->T1, s->T0);
1505             tcg_gen_mov_tl(s->T0, s->tmp0);
1506         } else {
1507             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1508         }
1509         /*
1510          * If TARGET_X86_64 defined then fall through into MO_32 case,
1511          * otherwise fall through default case.
1512          */
1513     case MO_32:
1514 #ifdef TARGET_X86_64
1515         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
1516         tcg_gen_subi_tl(s->tmp0, count, 1);
1517         if (is_right) {
1518             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
1519             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
1520             tcg_gen_shr_i64(s->T0, s->T0, count);
1521         } else {
1522             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
1523             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
1524             tcg_gen_shl_i64(s->T0, s->T0, count);
1525             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
1526             tcg_gen_shri_i64(s->T0, s->T0, 32);
1527         }
1528         break;
1529 #endif
1530     default:
1531         tcg_gen_subi_tl(s->tmp0, count, 1);
1532         if (is_right) {
1533             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1534 
1535             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1536             tcg_gen_shr_tl(s->T0, s->T0, count);
1537             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
1538         } else {
1539             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1540             if (ot == MO_16) {
1541                 /* Only needed if count > 16, for Intel behaviour.  */
1542                 tcg_gen_subfi_tl(s->tmp4, 33, count);
1543                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
1544                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
1545             }
1546 
1547             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
1548             tcg_gen_shl_tl(s->T0, s->T0, count);
1549             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
1550         }
1551         tcg_gen_movi_tl(s->tmp4, 0);
1552         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
1553                            s->tmp4, s->T1);
1554         tcg_gen_or_tl(s->T0, s->T0, s->T1);
1555         break;
1556     }
1557 
1558     /* store */
1559     gen_op_st_rm_T0_A0(s, ot, op1);
1560 
1561     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
1562 }
1563 
1564 #define X86_MAX_INSN_LENGTH 15
1565 
advance_pc(CPUX86State * env,DisasContext * s,int num_bytes)1566 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
1567 {
1568     uint64_t pc = s->pc;
1569 
1570     /* This is a subsequent insn that crosses a page boundary.  */
1571     if (s->base.num_insns > 1 &&
1572         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
1573         siglongjmp(s->jmpbuf, 2);
1574     }
1575 
1576     s->pc += num_bytes;
1577     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
1578         /* If the instruction's 16th byte is on a different page than the 1st, a
1579          * page fault on the second page wins over the general protection fault
1580          * caused by the instruction being too long.
1581          * This can happen even if the operand is only one byte long!
1582          */
1583         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
1584             (void)translator_ldub(env, &s->base,
1585                                   (s->pc - 1) & TARGET_PAGE_MASK);
1586         }
1587         siglongjmp(s->jmpbuf, 1);
1588     }
1589 
1590     return pc;
1591 }
1592 
x86_ldub_code(CPUX86State * env,DisasContext * s)1593 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
1594 {
1595     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
1596 }
1597 
x86_lduw_code(CPUX86State * env,DisasContext * s)1598 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
1599 {
1600     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
1601 }
1602 
x86_ldl_code(CPUX86State * env,DisasContext * s)1603 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
1604 {
1605     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
1606 }
1607 
1608 #ifdef TARGET_X86_64
x86_ldq_code(CPUX86State * env,DisasContext * s)1609 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
1610 {
1611     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
1612 }
1613 #endif
1614 
1615 /* Decompose an address.  */
1616 
1617 typedef struct AddressParts {
1618     int def_seg;
1619     int base;
1620     int index;
1621     int scale;
1622     target_long disp;
1623 } AddressParts;
1624 
gen_lea_modrm_0(CPUX86State * env,DisasContext * s,int modrm)1625 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1626                                     int modrm)
1627 {
1628     int def_seg, base, index, scale, mod, rm;
1629     target_long disp;
1630     bool havesib;
1631 
1632     def_seg = R_DS;
1633     index = -1;
1634     scale = 0;
1635     disp = 0;
1636 
1637     mod = (modrm >> 6) & 3;
1638     rm = modrm & 7;
1639     base = rm | REX_B(s);
1640 
1641     if (mod == 3) {
1642         /* Normally filtered out earlier, but including this path
1643            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
1644         goto done;
1645     }
1646 
1647     switch (s->aflag) {
1648     case MO_64:
1649     case MO_32:
1650         havesib = 0;
1651         if (rm == 4) {
1652             int code = x86_ldub_code(env, s);
1653             scale = (code >> 6) & 3;
1654             index = ((code >> 3) & 7) | REX_X(s);
1655             if (index == 4) {
1656                 index = -1;  /* no index */
1657             }
1658             base = (code & 7) | REX_B(s);
1659             havesib = 1;
1660         }
1661 
1662         switch (mod) {
1663         case 0:
1664             if ((base & 7) == 5) {
1665                 base = -1;
1666                 disp = (int32_t)x86_ldl_code(env, s);
1667                 if (CODE64(s) && !havesib) {
1668                     base = -2;
1669                     disp += s->pc + s->rip_offset;
1670                 }
1671             }
1672             break;
1673         case 1:
1674             disp = (int8_t)x86_ldub_code(env, s);
1675             break;
1676         default:
1677         case 2:
1678             disp = (int32_t)x86_ldl_code(env, s);
1679             break;
1680         }
1681 
1682         /* For correct popl handling with esp.  */
1683         if (base == R_ESP && s->popl_esp_hack) {
1684             disp += s->popl_esp_hack;
1685         }
1686         if (base == R_EBP || base == R_ESP) {
1687             def_seg = R_SS;
1688         }
1689         break;
1690 
1691     case MO_16:
1692         if (mod == 0) {
1693             if (rm == 6) {
1694                 base = -1;
1695                 disp = x86_lduw_code(env, s);
1696                 break;
1697             }
1698         } else if (mod == 1) {
1699             disp = (int8_t)x86_ldub_code(env, s);
1700         } else {
1701             disp = (int16_t)x86_lduw_code(env, s);
1702         }
1703 
1704         switch (rm) {
1705         case 0:
1706             base = R_EBX;
1707             index = R_ESI;
1708             break;
1709         case 1:
1710             base = R_EBX;
1711             index = R_EDI;
1712             break;
1713         case 2:
1714             base = R_EBP;
1715             index = R_ESI;
1716             def_seg = R_SS;
1717             break;
1718         case 3:
1719             base = R_EBP;
1720             index = R_EDI;
1721             def_seg = R_SS;
1722             break;
1723         case 4:
1724             base = R_ESI;
1725             break;
1726         case 5:
1727             base = R_EDI;
1728             break;
1729         case 6:
1730             base = R_EBP;
1731             def_seg = R_SS;
1732             break;
1733         default:
1734         case 7:
1735             base = R_EBX;
1736             break;
1737         }
1738         break;
1739 
1740     default:
1741         g_assert_not_reached();
1742     }
1743 
1744  done:
1745     return (AddressParts){ def_seg, base, index, scale, disp };
1746 }
1747 
1748 /* Compute the address, with a minimum number of TCG ops.  */
gen_lea_modrm_1(DisasContext * s,AddressParts a,bool is_vsib)1749 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
1750 {
1751     TCGv ea = NULL;
1752 
1753     if (a.index >= 0 && !is_vsib) {
1754         if (a.scale == 0) {
1755             ea = cpu_regs[a.index];
1756         } else {
1757             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
1758             ea = s->A0;
1759         }
1760         if (a.base >= 0) {
1761             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
1762             ea = s->A0;
1763         }
1764     } else if (a.base >= 0) {
1765         ea = cpu_regs[a.base];
1766     }
1767     if (!ea) {
1768         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
1769             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1770             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
1771         } else {
1772             tcg_gen_movi_tl(s->A0, a.disp);
1773         }
1774         ea = s->A0;
1775     } else if (a.disp != 0) {
1776         tcg_gen_addi_tl(s->A0, ea, a.disp);
1777         ea = s->A0;
1778     }
1779 
1780     return ea;
1781 }
1782 
gen_lea_modrm(CPUX86State * env,DisasContext * s,int modrm)1783 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1784 {
1785     AddressParts a = gen_lea_modrm_0(env, s, modrm);
1786     TCGv ea = gen_lea_modrm_1(s, a, false);
1787     gen_lea_v_seg(s, ea, a.def_seg, s->override);
1788 }
1789 
gen_nop_modrm(CPUX86State * env,DisasContext * s,int modrm)1790 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1791 {
1792     (void)gen_lea_modrm_0(env, s, modrm);
1793 }
1794 
1795 /* Used for BNDCL, BNDCU, BNDCN.  */
gen_bndck(CPUX86State * env,DisasContext * s,int modrm,TCGCond cond,TCGv_i64 bndv)1796 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1797                       TCGCond cond, TCGv_i64 bndv)
1798 {
1799     AddressParts a = gen_lea_modrm_0(env, s, modrm);
1800     TCGv ea = gen_lea_modrm_1(s, a, false);
1801 
1802     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
1803     if (!CODE64(s)) {
1804         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
1805     }
1806     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
1807     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
1808     gen_helper_bndck(tcg_env, s->tmp2_i32);
1809 }
1810 
1811 /* generate modrm load of memory or register. */
gen_ld_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot)1812 static void gen_ld_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1813 {
1814     int mod, rm;
1815 
1816     mod = (modrm >> 6) & 3;
1817     rm = (modrm & 7) | REX_B(s);
1818     if (mod == 3) {
1819         gen_op_mov_v_reg(s, ot, s->T0, rm);
1820     } else {
1821         gen_lea_modrm(env, s, modrm);
1822         gen_op_ld_v(s, ot, s->T0, s->A0);
1823     }
1824 }
1825 
1826 /* generate modrm store of memory or register. */
gen_st_modrm(CPUX86State * env,DisasContext * s,int modrm,MemOp ot)1827 static void gen_st_modrm(CPUX86State *env, DisasContext *s, int modrm, MemOp ot)
1828 {
1829     int mod, rm;
1830 
1831     mod = (modrm >> 6) & 3;
1832     rm = (modrm & 7) | REX_B(s);
1833     if (mod == 3) {
1834         gen_op_mov_reg_v(s, ot, rm, s->T0);
1835     } else {
1836         gen_lea_modrm(env, s, modrm);
1837         gen_op_st_v(s, ot, s->T0, s->A0);
1838     }
1839 }
1840 
insn_get_addr(CPUX86State * env,DisasContext * s,MemOp ot)1841 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
1842 {
1843     target_ulong ret;
1844 
1845     switch (ot) {
1846     case MO_8:
1847         ret = x86_ldub_code(env, s);
1848         break;
1849     case MO_16:
1850         ret = x86_lduw_code(env, s);
1851         break;
1852     case MO_32:
1853         ret = x86_ldl_code(env, s);
1854         break;
1855 #ifdef TARGET_X86_64
1856     case MO_64:
1857         ret = x86_ldq_code(env, s);
1858         break;
1859 #endif
1860     default:
1861         g_assert_not_reached();
1862     }
1863     return ret;
1864 }
1865 
insn_get(CPUX86State * env,DisasContext * s,MemOp ot)1866 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
1867 {
1868     uint32_t ret;
1869 
1870     switch (ot) {
1871     case MO_8:
1872         ret = x86_ldub_code(env, s);
1873         break;
1874     case MO_16:
1875         ret = x86_lduw_code(env, s);
1876         break;
1877     case MO_32:
1878 #ifdef TARGET_X86_64
1879     case MO_64:
1880 #endif
1881         ret = x86_ldl_code(env, s);
1882         break;
1883     default:
1884         g_assert_not_reached();
1885     }
1886     return ret;
1887 }
1888 
insn_get_signed(CPUX86State * env,DisasContext * s,MemOp ot)1889 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
1890 {
1891     target_long ret;
1892 
1893     switch (ot) {
1894     case MO_8:
1895         ret = (int8_t) x86_ldub_code(env, s);
1896         break;
1897     case MO_16:
1898         ret = (int16_t) x86_lduw_code(env, s);
1899         break;
1900     case MO_32:
1901         ret = (int32_t) x86_ldl_code(env, s);
1902         break;
1903 #ifdef TARGET_X86_64
1904     case MO_64:
1905         ret = x86_ldq_code(env, s);
1906         break;
1907 #endif
1908     default:
1909         g_assert_not_reached();
1910     }
1911     return ret;
1912 }
1913 
gen_conditional_jump_labels(DisasContext * s,target_long diff,TCGLabel * not_taken,TCGLabel * taken)1914 static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
1915                                         TCGLabel *not_taken, TCGLabel *taken)
1916 {
1917     if (not_taken) {
1918         gen_set_label(not_taken);
1919     }
1920     gen_jmp_rel_csize(s, 0, 1);
1921 
1922     gen_set_label(taken);
1923     gen_jmp_rel(s, s->dflag, diff, 0);
1924 }
1925 
gen_jcc(DisasContext * s,int b,int diff)1926 static void gen_jcc(DisasContext *s, int b, int diff)
1927 {
1928     TCGLabel *l1 = gen_new_label();
1929 
1930     gen_jcc1(s, b, l1);
1931     gen_conditional_jump_labels(s, diff, NULL, l1);
1932 }
1933 
gen_cmovcc1(DisasContext * s,int b,TCGv dest,TCGv src)1934 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
1935 {
1936     CCPrepare cc = gen_prepare_cc(s, b, NULL);
1937 
1938     if (!cc.use_reg2) {
1939         cc.reg2 = tcg_constant_tl(cc.imm);
1940     }
1941 
1942     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
1943 }
1944 
gen_op_movl_seg_real(DisasContext * s,X86Seg seg_reg,TCGv seg)1945 static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
1946 {
1947     TCGv selector = tcg_temp_new();
1948     tcg_gen_ext16u_tl(selector, seg);
1949     tcg_gen_st32_tl(selector, tcg_env,
1950                     offsetof(CPUX86State,segs[seg_reg].selector));
1951     tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
1952 }
1953 
1954 /* move SRC to seg_reg and compute if the CPU state may change. Never
1955    call this function with seg_reg == R_CS */
gen_movl_seg(DisasContext * s,X86Seg seg_reg,TCGv src)1956 static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
1957 {
1958     if (PE(s) && !VM86(s)) {
1959         tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
1960         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
1961         /* abort translation because the addseg value may change or
1962            because ss32 may change. For R_SS, translation must always
1963            stop as a special handling must be done to disable hardware
1964            interrupts for the next instruction */
1965         if (seg_reg == R_SS) {
1966             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1967         } else if (CODE32(s) && seg_reg < R_FS) {
1968             s->base.is_jmp = DISAS_EOB_NEXT;
1969         }
1970     } else {
1971         gen_op_movl_seg_real(s, seg_reg, src);
1972         if (seg_reg == R_SS) {
1973             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
1974         }
1975     }
1976 }
1977 
gen_far_call(DisasContext * s)1978 static void gen_far_call(DisasContext *s)
1979 {
1980     TCGv_i32 new_cs = tcg_temp_new_i32();
1981     tcg_gen_trunc_tl_i32(new_cs, s->T1);
1982     if (PE(s) && !VM86(s)) {
1983         gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
1984                                    tcg_constant_i32(s->dflag - 1),
1985                                    eip_next_tl(s));
1986     } else {
1987         TCGv_i32 new_eip = tcg_temp_new_i32();
1988         tcg_gen_trunc_tl_i32(new_eip, s->T0);
1989         gen_helper_lcall_real(tcg_env, new_cs, new_eip,
1990                               tcg_constant_i32(s->dflag - 1),
1991                               eip_next_i32(s));
1992     }
1993     s->base.is_jmp = DISAS_JUMP;
1994 }
1995 
gen_far_jmp(DisasContext * s)1996 static void gen_far_jmp(DisasContext *s)
1997 {
1998     if (PE(s) && !VM86(s)) {
1999         TCGv_i32 new_cs = tcg_temp_new_i32();
2000         tcg_gen_trunc_tl_i32(new_cs, s->T1);
2001         gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
2002                                   eip_next_tl(s));
2003     } else {
2004         gen_op_movl_seg_real(s, R_CS, s->T1);
2005         gen_op_jmp_v(s, s->T0);
2006     }
2007     s->base.is_jmp = DISAS_JUMP;
2008 }
2009 
gen_svm_check_intercept(DisasContext * s,uint32_t type)2010 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2011 {
2012     /* no SVM activated; fast case */
2013     if (likely(!GUEST(s))) {
2014         return;
2015     }
2016     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2017 }
2018 
gen_stack_update(DisasContext * s,int addend)2019 static inline void gen_stack_update(DisasContext *s, int addend)
2020 {
2021     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2022 }
2023 
gen_lea_ss_ofs(DisasContext * s,TCGv dest,TCGv src,target_ulong offset)2024 static void gen_lea_ss_ofs(DisasContext *s, TCGv dest, TCGv src, target_ulong offset)
2025 {
2026     if (offset) {
2027         tcg_gen_addi_tl(dest, src, offset);
2028         src = dest;
2029     }
2030     gen_lea_v_seg_dest(s, mo_stacksize(s), dest, src, R_SS, -1);
2031 }
2032 
2033 /* Generate a push. It depends on ss32, addseg and dflag.  */
gen_push_v(DisasContext * s,TCGv val)2034 static void gen_push_v(DisasContext *s, TCGv val)
2035 {
2036     MemOp d_ot = mo_pushpop(s, s->dflag);
2037     MemOp a_ot = mo_stacksize(s);
2038     int size = 1 << d_ot;
2039     TCGv new_esp = tcg_temp_new();
2040 
2041     tcg_gen_subi_tl(new_esp, cpu_regs[R_ESP], size);
2042 
2043     /* Now reduce the value to the address size and apply SS base.  */
2044     gen_lea_ss_ofs(s, s->A0, new_esp, 0);
2045     gen_op_st_v(s, d_ot, val, s->A0);
2046     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2047 }
2048 
2049 /* two step pop is necessary for precise exceptions */
gen_pop_T0(DisasContext * s)2050 static MemOp gen_pop_T0(DisasContext *s)
2051 {
2052     MemOp d_ot = mo_pushpop(s, s->dflag);
2053 
2054     gen_lea_ss_ofs(s, s->T0, cpu_regs[R_ESP], 0);
2055     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2056 
2057     return d_ot;
2058 }
2059 
gen_pop_update(DisasContext * s,MemOp ot)2060 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2061 {
2062     gen_stack_update(s, 1 << ot);
2063 }
2064 
gen_pusha(DisasContext * s)2065 static void gen_pusha(DisasContext *s)
2066 {
2067     MemOp d_ot = s->dflag;
2068     int size = 1 << d_ot;
2069     int i;
2070 
2071     for (i = 0; i < 8; i++) {
2072         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], (i - 8) * size);
2073         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2074     }
2075 
2076     gen_stack_update(s, -8 * size);
2077 }
2078 
gen_popa(DisasContext * s)2079 static void gen_popa(DisasContext *s)
2080 {
2081     MemOp d_ot = s->dflag;
2082     int size = 1 << d_ot;
2083     int i;
2084 
2085     for (i = 0; i < 8; i++) {
2086         /* ESP is not reloaded */
2087         if (7 - i == R_ESP) {
2088             continue;
2089         }
2090         gen_lea_ss_ofs(s, s->A0, cpu_regs[R_ESP], i * size);
2091         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2092         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2093     }
2094 
2095     gen_stack_update(s, 8 * size);
2096 }
2097 
gen_enter(DisasContext * s,int esp_addend,int level)2098 static void gen_enter(DisasContext *s, int esp_addend, int level)
2099 {
2100     MemOp d_ot = mo_pushpop(s, s->dflag);
2101     MemOp a_ot = mo_stacksize(s);
2102     int size = 1 << d_ot;
2103 
2104     /* Push BP; compute FrameTemp into T1.  */
2105     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2106     gen_lea_ss_ofs(s, s->A0, s->T1, 0);
2107     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2108 
2109     level &= 31;
2110     if (level != 0) {
2111         int i;
2112 
2113         /* Copy level-1 pointers from the previous frame.  */
2114         for (i = 1; i < level; ++i) {
2115             gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], -size * i);
2116             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2117 
2118             gen_lea_ss_ofs(s, s->A0, s->T1, -size * i);
2119             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2120         }
2121 
2122         /* Push the current FrameTemp as the last level.  */
2123         gen_lea_ss_ofs(s, s->A0, s->T1, -size * level);
2124         gen_op_st_v(s, d_ot, s->T1, s->A0);
2125     }
2126 
2127     /* Copy the FrameTemp value to EBP.  */
2128     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2129 
2130     /* Compute the final value of ESP.  */
2131     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2132     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2133 }
2134 
gen_leave(DisasContext * s)2135 static void gen_leave(DisasContext *s)
2136 {
2137     MemOp d_ot = mo_pushpop(s, s->dflag);
2138     MemOp a_ot = mo_stacksize(s);
2139 
2140     gen_lea_ss_ofs(s, s->A0, cpu_regs[R_EBP], 0);
2141     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2142 
2143     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2144 
2145     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2146     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2147 }
2148 
2149 /* Similarly, except that the assumption here is that we don't decode
2150    the instruction at all -- either a missing opcode, an unimplemented
2151    feature, or just a bogus instruction stream.  */
gen_unknown_opcode(CPUX86State * env,DisasContext * s)2152 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2153 {
2154     gen_illegal_opcode(s);
2155 
2156     if (qemu_loglevel_mask(LOG_UNIMP)) {
2157         FILE *logfile = qemu_log_trylock();
2158         if (logfile) {
2159             target_ulong pc = s->base.pc_next, end = s->pc;
2160 
2161             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2162             for (; pc < end; ++pc) {
2163                 fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
2164             }
2165             fprintf(logfile, "\n");
2166             qemu_log_unlock(logfile);
2167         }
2168     }
2169 }
2170 
2171 /* an interrupt is different from an exception because of the
2172    privilege checks */
gen_interrupt(DisasContext * s,uint8_t intno)2173 static void gen_interrupt(DisasContext *s, uint8_t intno)
2174 {
2175     gen_update_cc_op(s);
2176     gen_update_eip_cur(s);
2177     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2178                                cur_insn_len_i32(s));
2179     s->base.is_jmp = DISAS_NORETURN;
2180 }
2181 
gen_set_hflag(DisasContext * s,uint32_t mask)2182 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2183 {
2184     if ((s->flags & mask) == 0) {
2185         TCGv_i32 t = tcg_temp_new_i32();
2186         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2187         tcg_gen_ori_i32(t, t, mask);
2188         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2189         s->flags |= mask;
2190     }
2191 }
2192 
gen_reset_hflag(DisasContext * s,uint32_t mask)2193 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2194 {
2195     if (s->flags & mask) {
2196         TCGv_i32 t = tcg_temp_new_i32();
2197         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2198         tcg_gen_andi_i32(t, t, ~mask);
2199         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2200         s->flags &= ~mask;
2201     }
2202 }
2203 
gen_set_eflags(DisasContext * s,target_ulong mask)2204 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2205 {
2206     TCGv t = tcg_temp_new();
2207 
2208     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2209     tcg_gen_ori_tl(t, t, mask);
2210     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2211 }
2212 
gen_reset_eflags(DisasContext * s,target_ulong mask)2213 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2214 {
2215     TCGv t = tcg_temp_new();
2216 
2217     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2218     tcg_gen_andi_tl(t, t, ~mask);
2219     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2220 }
2221 
2222 /* Clear BND registers during legacy branches.  */
gen_bnd_jmp(DisasContext * s)2223 static void gen_bnd_jmp(DisasContext *s)
2224 {
2225     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2226        and if the BNDREGs are known to be in use (non-zero) already.
2227        The helper itself will check BNDPRESERVE at runtime.  */
2228     if ((s->prefix & PREFIX_REPNZ) == 0
2229         && (s->flags & HF_MPX_EN_MASK) != 0
2230         && (s->flags & HF_MPX_IU_MASK) != 0) {
2231         gen_helper_bnd_jmp(tcg_env);
2232     }
2233 }
2234 
2235 /*
2236  * Generate an end of block, including common tasks such as generating
2237  * single step traps, resetting the RF flag, and handling the interrupt
2238  * shadow.
2239  */
2240 static void
gen_eob(DisasContext * s,int mode)2241 gen_eob(DisasContext *s, int mode)
2242 {
2243     bool inhibit_reset;
2244 
2245     gen_update_cc_op(s);
2246 
2247     /* If several instructions disable interrupts, only the first does it.  */
2248     inhibit_reset = false;
2249     if (s->flags & HF_INHIBIT_IRQ_MASK) {
2250         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2251         inhibit_reset = true;
2252     } else if (mode == DISAS_EOB_INHIBIT_IRQ) {
2253         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2254     }
2255 
2256     if (s->base.tb->flags & HF_RF_MASK) {
2257         gen_reset_eflags(s, RF_MASK);
2258     }
2259     if (mode == DISAS_EOB_RECHECK_TF) {
2260         gen_helper_rechecking_single_step(tcg_env);
2261         tcg_gen_exit_tb(NULL, 0);
2262     } else if ((s->flags & HF_TF_MASK) && mode != DISAS_EOB_INHIBIT_IRQ) {
2263         gen_helper_single_step(tcg_env);
2264     } else if (mode == DISAS_JUMP &&
2265                /* give irqs a chance to happen */
2266                !inhibit_reset) {
2267         tcg_gen_lookup_and_goto_ptr();
2268     } else {
2269         tcg_gen_exit_tb(NULL, 0);
2270     }
2271 
2272     s->base.is_jmp = DISAS_NORETURN;
2273 }
2274 
2275 /* Jump to eip+diff, truncating the result to OT. */
gen_jmp_rel(DisasContext * s,MemOp ot,int diff,int tb_num)2276 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2277 {
2278     bool use_goto_tb = s->jmp_opt;
2279     target_ulong mask = -1;
2280     target_ulong new_pc = s->pc + diff;
2281     target_ulong new_eip = new_pc - s->cs_base;
2282 
2283     assert(!s->cc_op_dirty);
2284 
2285     /* In 64-bit mode, operand size is fixed at 64 bits. */
2286     if (!CODE64(s)) {
2287         if (ot == MO_16) {
2288             mask = 0xffff;
2289             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2290                 use_goto_tb = false;
2291             }
2292         } else {
2293             mask = 0xffffffff;
2294         }
2295     }
2296     new_eip &= mask;
2297 
2298     if (tb_cflags(s->base.tb) & CF_PCREL) {
2299         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2300         /*
2301          * If we can prove the branch does not leave the page and we have
2302          * no extra masking to apply (data16 branch in code32, see above),
2303          * then we have also proven that the addition does not wrap.
2304          */
2305         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2306             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2307             use_goto_tb = false;
2308         }
2309     } else if (!CODE64(s)) {
2310         new_pc = (uint32_t)(new_eip + s->cs_base);
2311     }
2312 
2313     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2314         /* jump to same page: we can use a direct jump */
2315         tcg_gen_goto_tb(tb_num);
2316         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2317             tcg_gen_movi_tl(cpu_eip, new_eip);
2318         }
2319         tcg_gen_exit_tb(s->base.tb, tb_num);
2320         s->base.is_jmp = DISAS_NORETURN;
2321     } else {
2322         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2323             tcg_gen_movi_tl(cpu_eip, new_eip);
2324         }
2325         if (s->jmp_opt) {
2326             gen_eob(s, DISAS_JUMP);   /* jump to another page */
2327         } else {
2328             gen_eob(s, DISAS_EOB_ONLY);  /* exit to main loop */
2329         }
2330     }
2331 }
2332 
2333 /* Jump to eip+diff, truncating to the current code size. */
gen_jmp_rel_csize(DisasContext * s,int diff,int tb_num)2334 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2335 {
2336     /* CODE64 ignores the OT argument, so we need not consider it. */
2337     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2338 }
2339 
gen_ldq_env_A0(DisasContext * s,int offset)2340 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2341 {
2342     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2343     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2344 }
2345 
gen_stq_env_A0(DisasContext * s,int offset)2346 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2347 {
2348     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2349     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2350 }
2351 
gen_ldo_env_A0(DisasContext * s,int offset,bool align)2352 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2353 {
2354     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2355                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2356     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2357     int mem_index = s->mem_index;
2358     TCGv_i128 t = tcg_temp_new_i128();
2359 
2360     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2361     tcg_gen_st_i128(t, tcg_env, offset);
2362 }
2363 
gen_sto_env_A0(DisasContext * s,int offset,bool align)2364 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2365 {
2366     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2367                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2368     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2369     int mem_index = s->mem_index;
2370     TCGv_i128 t = tcg_temp_new_i128();
2371 
2372     tcg_gen_ld_i128(t, tcg_env, offset);
2373     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2374 }
2375 
gen_ldy_env_A0(DisasContext * s,int offset,bool align)2376 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2377 {
2378     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2379     int mem_index = s->mem_index;
2380     TCGv_i128 t0 = tcg_temp_new_i128();
2381     TCGv_i128 t1 = tcg_temp_new_i128();
2382 
2383     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2384     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2385     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2386 
2387     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2388     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2389 }
2390 
gen_sty_env_A0(DisasContext * s,int offset,bool align)2391 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2392 {
2393     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2394     int mem_index = s->mem_index;
2395     TCGv_i128 t = tcg_temp_new_i128();
2396 
2397     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2398     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2399     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2400     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2401     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2402 }
2403 
gen_cmpxchg8b(DisasContext * s,CPUX86State * env,int modrm)2404 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2405 {
2406     TCGv_i64 cmp, val, old;
2407     TCGv Z;
2408 
2409     gen_lea_modrm(env, s, modrm);
2410 
2411     cmp = tcg_temp_new_i64();
2412     val = tcg_temp_new_i64();
2413     old = tcg_temp_new_i64();
2414 
2415     /* Construct the comparison values from the register pair. */
2416     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2417     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2418 
2419     /* Only require atomic with LOCK; non-parallel handled in generator. */
2420     if (s->prefix & PREFIX_LOCK) {
2421         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2422     } else {
2423         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
2424                                       s->mem_index, MO_TEUQ);
2425     }
2426 
2427     /* Set tmp0 to match the required value of Z. */
2428     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
2429     Z = tcg_temp_new();
2430     tcg_gen_trunc_i64_tl(Z, cmp);
2431 
2432     /*
2433      * Extract the result values for the register pair.
2434      * For 32-bit, we may do this unconditionally, because on success (Z=1),
2435      * the old value matches the previous value in EDX:EAX.  For x86_64,
2436      * the store must be conditional, because we must leave the source
2437      * registers unchanged on success, and zero-extend the writeback
2438      * on failure (Z=0).
2439      */
2440     if (TARGET_LONG_BITS == 32) {
2441         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
2442     } else {
2443         TCGv zero = tcg_constant_tl(0);
2444 
2445         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
2446         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
2447                            s->T0, cpu_regs[R_EAX]);
2448         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
2449                            s->T1, cpu_regs[R_EDX]);
2450     }
2451 
2452     /* Update Z. */
2453     gen_compute_eflags(s);
2454     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
2455 }
2456 
2457 #ifdef TARGET_X86_64
gen_cmpxchg16b(DisasContext * s,CPUX86State * env,int modrm)2458 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
2459 {
2460     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
2461     TCGv_i64 t0, t1;
2462     TCGv_i128 cmp, val;
2463 
2464     gen_lea_modrm(env, s, modrm);
2465 
2466     cmp = tcg_temp_new_i128();
2467     val = tcg_temp_new_i128();
2468     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2469     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2470 
2471     /* Only require atomic with LOCK; non-parallel handled in generator. */
2472     if (s->prefix & PREFIX_LOCK) {
2473         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2474     } else {
2475         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
2476     }
2477 
2478     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
2479 
2480     /* Determine success after the fact. */
2481     t0 = tcg_temp_new_i64();
2482     t1 = tcg_temp_new_i64();
2483     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
2484     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
2485     tcg_gen_or_i64(t0, t0, t1);
2486 
2487     /* Update Z. */
2488     gen_compute_eflags(s);
2489     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
2490     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
2491 
2492     /*
2493      * Extract the result values for the register pair.  We may do this
2494      * unconditionally, because on success (Z=1), the old value matches
2495      * the previous value in RDX:RAX.
2496      */
2497     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
2498     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
2499 }
2500 #endif
2501 
disas_insn_x87(DisasContext * s,CPUState * cpu,int b)2502 static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
2503 {
2504     CPUX86State *env = cpu_env(cpu);
2505     bool update_fip = true;
2506     int modrm, mod, rm, op;
2507 
2508     if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
2509         /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2510         /* XXX: what to do if illegal op ? */
2511         gen_exception(s, EXCP07_PREX);
2512         return true;
2513     }
2514     modrm = x86_ldub_code(env, s);
2515     mod = (modrm >> 6) & 3;
2516     rm = modrm & 7;
2517     op = ((b & 7) << 3) | ((modrm >> 3) & 7);
2518     if (mod != 3) {
2519         /* memory op */
2520         AddressParts a = gen_lea_modrm_0(env, s, modrm);
2521         TCGv ea = gen_lea_modrm_1(s, a, false);
2522         TCGv last_addr = tcg_temp_new();
2523         bool update_fdp = true;
2524 
2525         tcg_gen_mov_tl(last_addr, ea);
2526         gen_lea_v_seg(s, ea, a.def_seg, s->override);
2527 
2528         switch (op) {
2529         case 0x00 ... 0x07: /* fxxxs */
2530         case 0x10 ... 0x17: /* fixxxl */
2531         case 0x20 ... 0x27: /* fxxxl */
2532         case 0x30 ... 0x37: /* fixxx */
2533             {
2534                 int op1;
2535                 op1 = op & 7;
2536 
2537                 switch (op >> 4) {
2538                 case 0:
2539                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2540                                         s->mem_index, MO_LEUL);
2541                     gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
2542                     break;
2543                 case 1:
2544                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2545                                         s->mem_index, MO_LEUL);
2546                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2547                     break;
2548                 case 2:
2549                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2550                                         s->mem_index, MO_LEUQ);
2551                     gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
2552                     break;
2553                 case 3:
2554                 default:
2555                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2556                                         s->mem_index, MO_LESW);
2557                     gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
2558                     break;
2559                 }
2560 
2561                 gen_helper_fp_arith_ST0_FT0(op1);
2562                 if (op1 == 3) {
2563                     /* fcomp needs pop */
2564                     gen_helper_fpop(tcg_env);
2565                 }
2566             }
2567             break;
2568         case 0x08: /* flds */
2569         case 0x0a: /* fsts */
2570         case 0x0b: /* fstps */
2571         case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2572         case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2573         case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2574             switch (op & 7) {
2575             case 0:
2576                 switch (op >> 4) {
2577                 case 0:
2578                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2579                                         s->mem_index, MO_LEUL);
2580                     gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
2581                     break;
2582                 case 1:
2583                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2584                                         s->mem_index, MO_LEUL);
2585                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2586                     break;
2587                 case 2:
2588                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2589                                         s->mem_index, MO_LEUQ);
2590                     gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
2591                     break;
2592                 case 3:
2593                 default:
2594                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2595                                         s->mem_index, MO_LESW);
2596                     gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
2597                     break;
2598                 }
2599                 break;
2600             case 1:
2601                 /* XXX: the corresponding CPUID bit must be tested ! */
2602                 switch (op >> 4) {
2603                 case 1:
2604                     gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
2605                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2606                                         s->mem_index, MO_LEUL);
2607                     break;
2608                 case 2:
2609                     gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
2610                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2611                                         s->mem_index, MO_LEUQ);
2612                     break;
2613                 case 3:
2614                 default:
2615                     gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
2616                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2617                                         s->mem_index, MO_LEUW);
2618                     break;
2619                 }
2620                 gen_helper_fpop(tcg_env);
2621                 break;
2622             default:
2623                 switch (op >> 4) {
2624                 case 0:
2625                     gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
2626                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2627                                         s->mem_index, MO_LEUL);
2628                     break;
2629                 case 1:
2630                     gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
2631                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2632                                         s->mem_index, MO_LEUL);
2633                     break;
2634                 case 2:
2635                     gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
2636                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2637                                         s->mem_index, MO_LEUQ);
2638                     break;
2639                 case 3:
2640                 default:
2641                     gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
2642                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2643                                         s->mem_index, MO_LEUW);
2644                     break;
2645                 }
2646                 if ((op & 7) == 3) {
2647                     gen_helper_fpop(tcg_env);
2648                 }
2649                 break;
2650             }
2651             break;
2652         case 0x0c: /* fldenv mem */
2653             gen_helper_fldenv(tcg_env, s->A0,
2654                               tcg_constant_i32(s->dflag - 1));
2655             update_fip = update_fdp = false;
2656             break;
2657         case 0x0d: /* fldcw mem */
2658             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
2659                                 s->mem_index, MO_LEUW);
2660             gen_helper_fldcw(tcg_env, s->tmp2_i32);
2661             update_fip = update_fdp = false;
2662             break;
2663         case 0x0e: /* fnstenv mem */
2664             gen_helper_fstenv(tcg_env, s->A0,
2665                               tcg_constant_i32(s->dflag - 1));
2666             update_fip = update_fdp = false;
2667             break;
2668         case 0x0f: /* fnstcw mem */
2669             gen_helper_fnstcw(s->tmp2_i32, tcg_env);
2670             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2671                                 s->mem_index, MO_LEUW);
2672             update_fip = update_fdp = false;
2673             break;
2674         case 0x1d: /* fldt mem */
2675             gen_helper_fldt_ST0(tcg_env, s->A0);
2676             break;
2677         case 0x1f: /* fstpt mem */
2678             gen_helper_fstt_ST0(tcg_env, s->A0);
2679             gen_helper_fpop(tcg_env);
2680             break;
2681         case 0x2c: /* frstor mem */
2682             gen_helper_frstor(tcg_env, s->A0,
2683                               tcg_constant_i32(s->dflag - 1));
2684             update_fip = update_fdp = false;
2685             break;
2686         case 0x2e: /* fnsave mem */
2687             gen_helper_fsave(tcg_env, s->A0,
2688                              tcg_constant_i32(s->dflag - 1));
2689             update_fip = update_fdp = false;
2690             break;
2691         case 0x2f: /* fnstsw mem */
2692             gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2693             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
2694                                 s->mem_index, MO_LEUW);
2695             update_fip = update_fdp = false;
2696             break;
2697         case 0x3c: /* fbld */
2698             gen_helper_fbld_ST0(tcg_env, s->A0);
2699             break;
2700         case 0x3e: /* fbstp */
2701             gen_helper_fbst_ST0(tcg_env, s->A0);
2702             gen_helper_fpop(tcg_env);
2703             break;
2704         case 0x3d: /* fildll */
2705             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
2706                                 s->mem_index, MO_LEUQ);
2707             gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
2708             break;
2709         case 0x3f: /* fistpll */
2710             gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
2711             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
2712                                 s->mem_index, MO_LEUQ);
2713             gen_helper_fpop(tcg_env);
2714             break;
2715         default:
2716             return false;
2717         }
2718 
2719         if (update_fdp) {
2720             int last_seg = s->override >= 0 ? s->override : a.def_seg;
2721 
2722             tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
2723                            offsetof(CPUX86State,
2724                                     segs[last_seg].selector));
2725             tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
2726                              offsetof(CPUX86State, fpds));
2727             tcg_gen_st_tl(last_addr, tcg_env,
2728                           offsetof(CPUX86State, fpdp));
2729         }
2730     } else {
2731         /* register float ops */
2732         int opreg = rm;
2733 
2734         switch (op) {
2735         case 0x08: /* fld sti */
2736             gen_helper_fpush(tcg_env);
2737             gen_helper_fmov_ST0_STN(tcg_env,
2738                                     tcg_constant_i32((opreg + 1) & 7));
2739             break;
2740         case 0x09: /* fxchg sti */
2741         case 0x29: /* fxchg4 sti, undocumented op */
2742         case 0x39: /* fxchg7 sti, undocumented op */
2743             gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
2744             break;
2745         case 0x0a: /* grp d9/2 */
2746             switch (rm) {
2747             case 0: /* fnop */
2748                 /*
2749                  * check exceptions (FreeBSD FPU probe)
2750                  * needs to be treated as I/O because of ferr_irq
2751                  */
2752                 translator_io_start(&s->base);
2753                 gen_helper_fwait(tcg_env);
2754                 update_fip = false;
2755                 break;
2756             default:
2757                 return false;
2758             }
2759             break;
2760         case 0x0c: /* grp d9/4 */
2761             switch (rm) {
2762             case 0: /* fchs */
2763                 gen_helper_fchs_ST0(tcg_env);
2764                 break;
2765             case 1: /* fabs */
2766                 gen_helper_fabs_ST0(tcg_env);
2767                 break;
2768             case 4: /* ftst */
2769                 gen_helper_fldz_FT0(tcg_env);
2770                 gen_helper_fcom_ST0_FT0(tcg_env);
2771                 break;
2772             case 5: /* fxam */
2773                 gen_helper_fxam_ST0(tcg_env);
2774                 break;
2775             default:
2776                 return false;
2777             }
2778             break;
2779         case 0x0d: /* grp d9/5 */
2780             {
2781                 switch (rm) {
2782                 case 0:
2783                     gen_helper_fpush(tcg_env);
2784                     gen_helper_fld1_ST0(tcg_env);
2785                     break;
2786                 case 1:
2787                     gen_helper_fpush(tcg_env);
2788                     gen_helper_fldl2t_ST0(tcg_env);
2789                     break;
2790                 case 2:
2791                     gen_helper_fpush(tcg_env);
2792                     gen_helper_fldl2e_ST0(tcg_env);
2793                     break;
2794                 case 3:
2795                     gen_helper_fpush(tcg_env);
2796                     gen_helper_fldpi_ST0(tcg_env);
2797                     break;
2798                 case 4:
2799                     gen_helper_fpush(tcg_env);
2800                     gen_helper_fldlg2_ST0(tcg_env);
2801                     break;
2802                 case 5:
2803                     gen_helper_fpush(tcg_env);
2804                     gen_helper_fldln2_ST0(tcg_env);
2805                     break;
2806                 case 6:
2807                     gen_helper_fpush(tcg_env);
2808                     gen_helper_fldz_ST0(tcg_env);
2809                     break;
2810                 default:
2811                     return false;
2812                 }
2813             }
2814             break;
2815         case 0x0e: /* grp d9/6 */
2816             switch (rm) {
2817             case 0: /* f2xm1 */
2818                 gen_helper_f2xm1(tcg_env);
2819                 break;
2820             case 1: /* fyl2x */
2821                 gen_helper_fyl2x(tcg_env);
2822                 break;
2823             case 2: /* fptan */
2824                 gen_helper_fptan(tcg_env);
2825                 break;
2826             case 3: /* fpatan */
2827                 gen_helper_fpatan(tcg_env);
2828                 break;
2829             case 4: /* fxtract */
2830                 gen_helper_fxtract(tcg_env);
2831                 break;
2832             case 5: /* fprem1 */
2833                 gen_helper_fprem1(tcg_env);
2834                 break;
2835             case 6: /* fdecstp */
2836                 gen_helper_fdecstp(tcg_env);
2837                 break;
2838             default:
2839             case 7: /* fincstp */
2840                 gen_helper_fincstp(tcg_env);
2841                 break;
2842             }
2843             break;
2844         case 0x0f: /* grp d9/7 */
2845             switch (rm) {
2846             case 0: /* fprem */
2847                 gen_helper_fprem(tcg_env);
2848                 break;
2849             case 1: /* fyl2xp1 */
2850                 gen_helper_fyl2xp1(tcg_env);
2851                 break;
2852             case 2: /* fsqrt */
2853                 gen_helper_fsqrt(tcg_env);
2854                 break;
2855             case 3: /* fsincos */
2856                 gen_helper_fsincos(tcg_env);
2857                 break;
2858             case 5: /* fscale */
2859                 gen_helper_fscale(tcg_env);
2860                 break;
2861             case 4: /* frndint */
2862                 gen_helper_frndint(tcg_env);
2863                 break;
2864             case 6: /* fsin */
2865                 gen_helper_fsin(tcg_env);
2866                 break;
2867             default:
2868             case 7: /* fcos */
2869                 gen_helper_fcos(tcg_env);
2870                 break;
2871             }
2872             break;
2873         case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2874         case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2875         case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2876             {
2877                 int op1;
2878 
2879                 op1 = op & 7;
2880                 if (op >= 0x20) {
2881                     gen_helper_fp_arith_STN_ST0(op1, opreg);
2882                     if (op >= 0x30) {
2883                         gen_helper_fpop(tcg_env);
2884                     }
2885                 } else {
2886                     gen_helper_fmov_FT0_STN(tcg_env,
2887                                             tcg_constant_i32(opreg));
2888                     gen_helper_fp_arith_ST0_FT0(op1);
2889                 }
2890             }
2891             break;
2892         case 0x02: /* fcom */
2893         case 0x22: /* fcom2, undocumented op */
2894             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2895             gen_helper_fcom_ST0_FT0(tcg_env);
2896             break;
2897         case 0x03: /* fcomp */
2898         case 0x23: /* fcomp3, undocumented op */
2899         case 0x32: /* fcomp5, undocumented op */
2900             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2901             gen_helper_fcom_ST0_FT0(tcg_env);
2902             gen_helper_fpop(tcg_env);
2903             break;
2904         case 0x15: /* da/5 */
2905             switch (rm) {
2906             case 1: /* fucompp */
2907                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2908                 gen_helper_fucom_ST0_FT0(tcg_env);
2909                 gen_helper_fpop(tcg_env);
2910                 gen_helper_fpop(tcg_env);
2911                 break;
2912             default:
2913                 return false;
2914             }
2915             break;
2916         case 0x1c:
2917             switch (rm) {
2918             case 0: /* feni (287 only, just do nop here) */
2919                 break;
2920             case 1: /* fdisi (287 only, just do nop here) */
2921                 break;
2922             case 2: /* fclex */
2923                 gen_helper_fclex(tcg_env);
2924                 update_fip = false;
2925                 break;
2926             case 3: /* fninit */
2927                 gen_helper_fninit(tcg_env);
2928                 update_fip = false;
2929                 break;
2930             case 4: /* fsetpm (287 only, just do nop here) */
2931                 break;
2932             default:
2933                 return false;
2934             }
2935             break;
2936         case 0x1d: /* fucomi */
2937             if (!(s->cpuid_features & CPUID_CMOV)) {
2938                 goto illegal_op;
2939             }
2940             gen_update_cc_op(s);
2941             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2942             gen_helper_fucomi_ST0_FT0(tcg_env);
2943             assume_cc_op(s, CC_OP_EFLAGS);
2944             break;
2945         case 0x1e: /* fcomi */
2946             if (!(s->cpuid_features & CPUID_CMOV)) {
2947                 goto illegal_op;
2948             }
2949             gen_update_cc_op(s);
2950             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2951             gen_helper_fcomi_ST0_FT0(tcg_env);
2952             assume_cc_op(s, CC_OP_EFLAGS);
2953             break;
2954         case 0x28: /* ffree sti */
2955             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2956             break;
2957         case 0x2a: /* fst sti */
2958             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2959             break;
2960         case 0x2b: /* fstp sti */
2961         case 0x0b: /* fstp1 sti, undocumented op */
2962         case 0x3a: /* fstp8 sti, undocumented op */
2963         case 0x3b: /* fstp9 sti, undocumented op */
2964             gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
2965             gen_helper_fpop(tcg_env);
2966             break;
2967         case 0x2c: /* fucom st(i) */
2968             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2969             gen_helper_fucom_ST0_FT0(tcg_env);
2970             break;
2971         case 0x2d: /* fucomp st(i) */
2972             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
2973             gen_helper_fucom_ST0_FT0(tcg_env);
2974             gen_helper_fpop(tcg_env);
2975             break;
2976         case 0x33: /* de/3 */
2977             switch (rm) {
2978             case 1: /* fcompp */
2979                 gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
2980                 gen_helper_fcom_ST0_FT0(tcg_env);
2981                 gen_helper_fpop(tcg_env);
2982                 gen_helper_fpop(tcg_env);
2983                 break;
2984             default:
2985                 return false;
2986             }
2987             break;
2988         case 0x38: /* ffreep sti, undocumented op */
2989             gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
2990             gen_helper_fpop(tcg_env);
2991             break;
2992         case 0x3c: /* df/4 */
2993             switch (rm) {
2994             case 0:
2995                 gen_helper_fnstsw(s->tmp2_i32, tcg_env);
2996                 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
2997                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
2998                 break;
2999             default:
3000                 return false;
3001             }
3002             break;
3003         case 0x3d: /* fucomip */
3004             if (!(s->cpuid_features & CPUID_CMOV)) {
3005                 goto illegal_op;
3006             }
3007             gen_update_cc_op(s);
3008             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
3009             gen_helper_fucomi_ST0_FT0(tcg_env);
3010             gen_helper_fpop(tcg_env);
3011             assume_cc_op(s, CC_OP_EFLAGS);
3012             break;
3013         case 0x3e: /* fcomip */
3014             if (!(s->cpuid_features & CPUID_CMOV)) {
3015                 goto illegal_op;
3016             }
3017             gen_update_cc_op(s);
3018             gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
3019             gen_helper_fcomi_ST0_FT0(tcg_env);
3020             gen_helper_fpop(tcg_env);
3021             assume_cc_op(s, CC_OP_EFLAGS);
3022             break;
3023         case 0x10 ... 0x13: /* fcmovxx */
3024         case 0x18 ... 0x1b:
3025             {
3026                 int op1;
3027                 TCGLabel *l1;
3028                 static const uint8_t fcmov_cc[8] = {
3029                     (JCC_B << 1),
3030                     (JCC_Z << 1),
3031                     (JCC_BE << 1),
3032                     (JCC_P << 1),
3033                 };
3034 
3035                 if (!(s->cpuid_features & CPUID_CMOV)) {
3036                     goto illegal_op;
3037                 }
3038                 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
3039                 l1 = gen_new_label();
3040                 gen_jcc1_noeob(s, op1, l1);
3041                 gen_helper_fmov_ST0_STN(tcg_env,
3042                                         tcg_constant_i32(opreg));
3043                 gen_set_label(l1);
3044             }
3045             break;
3046         default:
3047             return false;
3048         }
3049     }
3050 
3051     if (update_fip) {
3052         tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
3053                        offsetof(CPUX86State, segs[R_CS].selector));
3054         tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
3055                          offsetof(CPUX86State, fpcs));
3056         tcg_gen_st_tl(eip_cur_tl(s),
3057                       tcg_env, offsetof(CPUX86State, fpip));
3058     }
3059     return true;
3060 
3061  illegal_op:
3062     gen_illegal_opcode(s);
3063     return true;
3064 }
3065 
disas_insn_old(DisasContext * s,CPUState * cpu,int b)3066 static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
3067 {
3068     CPUX86State *env = cpu_env(cpu);
3069     int prefixes = s->prefix;
3070     MemOp dflag = s->dflag;
3071     int shift;
3072     MemOp ot;
3073     int modrm, reg, rm, mod, op, opreg, val;
3074 
3075     /* now check op code */
3076     switch (b) {
3077         /**************************/
3078         /* arith & logic */
3079     case 0x1c0:
3080     case 0x1c1: /* xadd Ev, Gv */
3081         ot = mo_b_d(b, dflag);
3082         modrm = x86_ldub_code(env, s);
3083         reg = ((modrm >> 3) & 7) | REX_R(s);
3084         mod = (modrm >> 6) & 3;
3085         gen_op_mov_v_reg(s, ot, s->T0, reg);
3086         if (mod == 3) {
3087             rm = (modrm & 7) | REX_B(s);
3088             gen_op_mov_v_reg(s, ot, s->T1, rm);
3089             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3090             gen_op_mov_reg_v(s, ot, reg, s->T1);
3091             gen_op_mov_reg_v(s, ot, rm, s->T0);
3092         } else {
3093             gen_lea_modrm(env, s, modrm);
3094             if (s->prefix & PREFIX_LOCK) {
3095                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3096                                             s->mem_index, ot | MO_LE);
3097                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3098             } else {
3099                 gen_op_ld_v(s, ot, s->T1, s->A0);
3100                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3101                 gen_op_st_v(s, ot, s->T0, s->A0);
3102             }
3103             gen_op_mov_reg_v(s, ot, reg, s->T1);
3104         }
3105         gen_op_update2_cc(s);
3106         set_cc_op(s, CC_OP_ADDB + ot);
3107         break;
3108     case 0x1b0:
3109     case 0x1b1: /* cmpxchg Ev, Gv */
3110         {
3111             TCGv oldv, newv, cmpv, dest;
3112 
3113             ot = mo_b_d(b, dflag);
3114             modrm = x86_ldub_code(env, s);
3115             reg = ((modrm >> 3) & 7) | REX_R(s);
3116             mod = (modrm >> 6) & 3;
3117             oldv = tcg_temp_new();
3118             newv = tcg_temp_new();
3119             cmpv = tcg_temp_new();
3120             gen_op_mov_v_reg(s, ot, newv, reg);
3121             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3122             gen_extu(ot, cmpv);
3123             if (s->prefix & PREFIX_LOCK) {
3124                 if (mod == 3) {
3125                     goto illegal_op;
3126                 }
3127                 gen_lea_modrm(env, s, modrm);
3128                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3129                                           s->mem_index, ot | MO_LE);
3130             } else {
3131                 if (mod == 3) {
3132                     rm = (modrm & 7) | REX_B(s);
3133                     gen_op_mov_v_reg(s, ot, oldv, rm);
3134                     gen_extu(ot, oldv);
3135 
3136                     /*
3137                      * Unlike the memory case, where "the destination operand receives
3138                      * a write cycle without regard to the result of the comparison",
3139                      * rm must not be touched altogether if the write fails, including
3140                      * not zero-extending it on 64-bit processors.  So, precompute
3141                      * the result of a successful writeback and perform the movcond
3142                      * directly on cpu_regs.  Also need to write accumulator first, in
3143                      * case rm is part of RAX too.
3144                      */
3145                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3146                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3147                 } else {
3148                     gen_lea_modrm(env, s, modrm);
3149                     gen_op_ld_v(s, ot, oldv, s->A0);
3150 
3151                     /*
3152                      * Perform an unconditional store cycle like physical cpu;
3153                      * must be before changing accumulator to ensure
3154                      * idempotency if the store faults and the instruction
3155                      * is restarted
3156                      */
3157                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3158                     gen_op_st_v(s, ot, newv, s->A0);
3159                 }
3160             }
3161 	    /*
3162 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3163 	     * since it's dead here.
3164 	     */
3165             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3166             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3167             tcg_gen_mov_tl(cpu_cc_src, oldv);
3168             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3169             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3170             set_cc_op(s, CC_OP_SUBB + ot);
3171         }
3172         break;
3173     case 0x1c7: /* cmpxchg8b */
3174         modrm = x86_ldub_code(env, s);
3175         mod = (modrm >> 6) & 3;
3176         switch ((modrm >> 3) & 7) {
3177         case 1: /* CMPXCHG8, CMPXCHG16 */
3178             if (mod == 3) {
3179                 goto illegal_op;
3180             }
3181 #ifdef TARGET_X86_64
3182             if (dflag == MO_64) {
3183                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3184                     goto illegal_op;
3185                 }
3186                 gen_cmpxchg16b(s, env, modrm);
3187                 break;
3188             }
3189 #endif
3190             if (!(s->cpuid_features & CPUID_CX8)) {
3191                 goto illegal_op;
3192             }
3193             gen_cmpxchg8b(s, env, modrm);
3194             break;
3195 
3196         case 7: /* RDSEED, RDPID with f3 prefix */
3197             if (mod != 3 ||
3198                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3199                 goto illegal_op;
3200             }
3201             if (s->prefix & PREFIX_REPZ) {
3202                 if (!(s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_RDPID)) {
3203                     goto illegal_op;
3204                 }
3205                 gen_helper_rdpid(s->T0, tcg_env);
3206                 rm = (modrm & 7) | REX_B(s);
3207                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3208                 break;
3209             } else {
3210                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3211                     goto illegal_op;
3212                 }
3213                 goto do_rdrand;
3214             }
3215 
3216         case 6: /* RDRAND */
3217             if (mod != 3 ||
3218                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3219                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3220                 goto illegal_op;
3221             }
3222         do_rdrand:
3223             translator_io_start(&s->base);
3224             gen_helper_rdrand(s->T0, tcg_env);
3225             rm = (modrm & 7) | REX_B(s);
3226             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3227             assume_cc_op(s, CC_OP_EFLAGS);
3228             break;
3229 
3230         default:
3231             goto illegal_op;
3232         }
3233         break;
3234 
3235         /**************************/
3236         /* shifts */
3237     case 0x1a4: /* shld imm */
3238         op = 0;
3239         shift = 1;
3240         goto do_shiftd;
3241     case 0x1a5: /* shld cl */
3242         op = 0;
3243         shift = 0;
3244         goto do_shiftd;
3245     case 0x1ac: /* shrd imm */
3246         op = 1;
3247         shift = 1;
3248         goto do_shiftd;
3249     case 0x1ad: /* shrd cl */
3250         op = 1;
3251         shift = 0;
3252     do_shiftd:
3253         ot = dflag;
3254         modrm = x86_ldub_code(env, s);
3255         mod = (modrm >> 6) & 3;
3256         rm = (modrm & 7) | REX_B(s);
3257         reg = ((modrm >> 3) & 7) | REX_R(s);
3258         if (mod != 3) {
3259             gen_lea_modrm(env, s, modrm);
3260             opreg = OR_TMP0;
3261         } else {
3262             opreg = rm;
3263         }
3264         gen_op_mov_v_reg(s, ot, s->T1, reg);
3265 
3266         if (shift) {
3267             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
3268             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
3269         } else {
3270             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
3271         }
3272         break;
3273 
3274         /************************/
3275         /* bit operations */
3276     case 0x1ba: /* bt/bts/btr/btc Gv, im */
3277         ot = dflag;
3278         modrm = x86_ldub_code(env, s);
3279         op = (modrm >> 3) & 7;
3280         mod = (modrm >> 6) & 3;
3281         rm = (modrm & 7) | REX_B(s);
3282         if (mod != 3) {
3283             s->rip_offset = 1;
3284             gen_lea_modrm(env, s, modrm);
3285             if (!(s->prefix & PREFIX_LOCK)) {
3286                 gen_op_ld_v(s, ot, s->T0, s->A0);
3287             }
3288         } else {
3289             gen_op_mov_v_reg(s, ot, s->T0, rm);
3290         }
3291         /* load shift */
3292         val = x86_ldub_code(env, s);
3293         tcg_gen_movi_tl(s->T1, val);
3294         if (op < 4)
3295             goto unknown_op;
3296         op -= 4;
3297         goto bt_op;
3298     case 0x1a3: /* bt Gv, Ev */
3299         op = 0;
3300         goto do_btx;
3301     case 0x1ab: /* bts */
3302         op = 1;
3303         goto do_btx;
3304     case 0x1b3: /* btr */
3305         op = 2;
3306         goto do_btx;
3307     case 0x1bb: /* btc */
3308         op = 3;
3309     do_btx:
3310         ot = dflag;
3311         modrm = x86_ldub_code(env, s);
3312         reg = ((modrm >> 3) & 7) | REX_R(s);
3313         mod = (modrm >> 6) & 3;
3314         rm = (modrm & 7) | REX_B(s);
3315         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
3316         if (mod != 3) {
3317             AddressParts a = gen_lea_modrm_0(env, s, modrm);
3318             /* specific case: we need to add a displacement */
3319             gen_exts(ot, s->T1);
3320             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
3321             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
3322             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
3323             gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
3324             if (!(s->prefix & PREFIX_LOCK)) {
3325                 gen_op_ld_v(s, ot, s->T0, s->A0);
3326             }
3327         } else {
3328             gen_op_mov_v_reg(s, ot, s->T0, rm);
3329         }
3330     bt_op:
3331         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
3332         tcg_gen_movi_tl(s->tmp0, 1);
3333         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
3334         if (s->prefix & PREFIX_LOCK) {
3335             switch (op) {
3336             case 0: /* bt */
3337                 /* Needs no atomic ops; we suppressed the normal
3338                    memory load for LOCK above so do it now.  */
3339                 gen_op_ld_v(s, ot, s->T0, s->A0);
3340                 break;
3341             case 1: /* bts */
3342                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
3343                                            s->mem_index, ot | MO_LE);
3344                 break;
3345             case 2: /* btr */
3346                 tcg_gen_not_tl(s->tmp0, s->tmp0);
3347                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
3348                                             s->mem_index, ot | MO_LE);
3349                 break;
3350             default:
3351             case 3: /* btc */
3352                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
3353                                             s->mem_index, ot | MO_LE);
3354                 break;
3355             }
3356             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3357         } else {
3358             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
3359             switch (op) {
3360             case 0: /* bt */
3361                 /* Data already loaded; nothing to do.  */
3362                 break;
3363             case 1: /* bts */
3364                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
3365                 break;
3366             case 2: /* btr */
3367                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
3368                 break;
3369             default:
3370             case 3: /* btc */
3371                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
3372                 break;
3373             }
3374             if (op != 0) {
3375                 if (mod != 3) {
3376                     gen_op_st_v(s, ot, s->T0, s->A0);
3377                 } else {
3378                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3379                 }
3380             }
3381         }
3382 
3383         /* Delay all CC updates until after the store above.  Note that
3384            C is the result of the test, Z is unchanged, and the others
3385            are all undefined.  */
3386         switch (s->cc_op) {
3387         case CC_OP_MULB ... CC_OP_MULQ:
3388         case CC_OP_ADDB ... CC_OP_ADDQ:
3389         case CC_OP_ADCB ... CC_OP_ADCQ:
3390         case CC_OP_SUBB ... CC_OP_SUBQ:
3391         case CC_OP_SBBB ... CC_OP_SBBQ:
3392         case CC_OP_LOGICB ... CC_OP_LOGICQ:
3393         case CC_OP_INCB ... CC_OP_INCQ:
3394         case CC_OP_DECB ... CC_OP_DECQ:
3395         case CC_OP_SHLB ... CC_OP_SHLQ:
3396         case CC_OP_SARB ... CC_OP_SARQ:
3397         case CC_OP_BMILGB ... CC_OP_BMILGQ:
3398             /* Z was going to be computed from the non-zero status of CC_DST.
3399                We can get that same Z value (and the new C value) by leaving
3400                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
3401                same width.  */
3402             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
3403             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
3404             break;
3405         default:
3406             /* Otherwise, generate EFLAGS and replace the C bit.  */
3407             gen_compute_eflags(s);
3408             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
3409                                ctz32(CC_C), 1);
3410             break;
3411         }
3412         break;
3413     case 0x1bc: /* bsf / tzcnt */
3414     case 0x1bd: /* bsr / lzcnt */
3415         ot = dflag;
3416         modrm = x86_ldub_code(env, s);
3417         reg = ((modrm >> 3) & 7) | REX_R(s);
3418         gen_ld_modrm(env, s, modrm, ot);
3419         gen_extu(ot, s->T0);
3420 
3421         /* Note that lzcnt and tzcnt are in different extensions.  */
3422         if ((prefixes & PREFIX_REPZ)
3423             && (b & 1
3424                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
3425                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
3426             int size = 8 << ot;
3427             /* For lzcnt/tzcnt, C bit is defined related to the input. */
3428             tcg_gen_mov_tl(cpu_cc_src, s->T0);
3429             if (b & 1) {
3430                 /* For lzcnt, reduce the target_ulong result by the
3431                    number of zeros that we expect to find at the top.  */
3432                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
3433                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
3434             } else {
3435                 /* For tzcnt, a zero input must return the operand size.  */
3436                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
3437             }
3438             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
3439             gen_op_update1_cc(s);
3440             set_cc_op(s, CC_OP_BMILGB + ot);
3441         } else {
3442             /* For bsr/bsf, only the Z bit is defined and it is related
3443                to the input and not the result.  */
3444             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3445             set_cc_op(s, CC_OP_LOGICB + ot);
3446 
3447             /* ??? The manual says that the output is undefined when the
3448                input is zero, but real hardware leaves it unchanged, and
3449                real programs appear to depend on that.  Accomplish this
3450                by passing the output as the value to return upon zero.  */
3451             if (b & 1) {
3452                 /* For bsr, return the bit index of the first 1 bit,
3453                    not the count of leading zeros.  */
3454                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
3455                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
3456                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
3457             } else {
3458                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
3459             }
3460         }
3461         gen_op_mov_reg_v(s, ot, reg, s->T0);
3462         break;
3463     case 0x130: /* wrmsr */
3464     case 0x132: /* rdmsr */
3465         if (check_cpl0(s)) {
3466             gen_update_cc_op(s);
3467             gen_update_eip_cur(s);
3468             if (b & 2) {
3469                 gen_helper_rdmsr(tcg_env);
3470             } else {
3471                 gen_helper_wrmsr(tcg_env);
3472                 s->base.is_jmp = DISAS_EOB_NEXT;
3473             }
3474         }
3475         break;
3476     case 0x131: /* rdtsc */
3477         gen_update_cc_op(s);
3478         gen_update_eip_cur(s);
3479         translator_io_start(&s->base);
3480         gen_helper_rdtsc(tcg_env);
3481         break;
3482     case 0x133: /* rdpmc */
3483         gen_update_cc_op(s);
3484         gen_update_eip_cur(s);
3485         gen_helper_rdpmc(tcg_env);
3486         s->base.is_jmp = DISAS_NORETURN;
3487         break;
3488     case 0x134: /* sysenter */
3489         /* For AMD SYSENTER is not valid in long mode */
3490         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
3491             goto illegal_op;
3492         }
3493         if (!PE(s)) {
3494             gen_exception_gpf(s);
3495         } else {
3496             gen_helper_sysenter(tcg_env);
3497             s->base.is_jmp = DISAS_EOB_ONLY;
3498         }
3499         break;
3500     case 0x135: /* sysexit */
3501         /* For AMD SYSEXIT is not valid in long mode */
3502         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
3503             goto illegal_op;
3504         }
3505         if (!PE(s) || CPL(s) != 0) {
3506             gen_exception_gpf(s);
3507         } else {
3508             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
3509             s->base.is_jmp = DISAS_EOB_ONLY;
3510         }
3511         break;
3512     case 0x105: /* syscall */
3513         /* For Intel SYSCALL is only valid in long mode */
3514         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
3515             goto illegal_op;
3516         }
3517         gen_update_cc_op(s);
3518         gen_update_eip_cur(s);
3519         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
3520         /* condition codes are modified only in long mode */
3521         if (LMA(s)) {
3522             assume_cc_op(s, CC_OP_EFLAGS);
3523         }
3524         /* TF handling for the syscall insn is different. The TF bit is  checked
3525            after the syscall insn completes. This allows #DB to not be
3526            generated after one has entered CPL0 if TF is set in FMASK.  */
3527         s->base.is_jmp = DISAS_EOB_RECHECK_TF;
3528         break;
3529     case 0x107: /* sysret */
3530         /* For Intel SYSRET is only valid in long mode */
3531         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
3532             goto illegal_op;
3533         }
3534         if (!PE(s) || CPL(s) != 0) {
3535             gen_exception_gpf(s);
3536         } else {
3537             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
3538             /* condition codes are modified only in long mode */
3539             if (LMA(s)) {
3540                 assume_cc_op(s, CC_OP_EFLAGS);
3541             }
3542             /* TF handling for the sysret insn is different. The TF bit is
3543                checked after the sysret insn completes. This allows #DB to be
3544                generated "as if" the syscall insn in userspace has just
3545                completed.  */
3546             s->base.is_jmp = DISAS_EOB_RECHECK_TF;
3547         }
3548         break;
3549     case 0x1a2: /* cpuid */
3550         gen_update_cc_op(s);
3551         gen_update_eip_cur(s);
3552         gen_helper_cpuid(tcg_env);
3553         break;
3554     case 0x100:
3555         modrm = x86_ldub_code(env, s);
3556         mod = (modrm >> 6) & 3;
3557         op = (modrm >> 3) & 7;
3558         switch(op) {
3559         case 0: /* sldt */
3560             if (!PE(s) || VM86(s))
3561                 goto illegal_op;
3562             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3563                 break;
3564             }
3565             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
3566             tcg_gen_ld32u_tl(s->T0, tcg_env,
3567                              offsetof(CPUX86State, ldt.selector));
3568             ot = mod == 3 ? dflag : MO_16;
3569             gen_st_modrm(env, s, modrm, ot);
3570             break;
3571         case 2: /* lldt */
3572             if (!PE(s) || VM86(s))
3573                 goto illegal_op;
3574             if (check_cpl0(s)) {
3575                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
3576                 gen_ld_modrm(env, s, modrm, MO_16);
3577                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3578                 gen_helper_lldt(tcg_env, s->tmp2_i32);
3579             }
3580             break;
3581         case 1: /* str */
3582             if (!PE(s) || VM86(s))
3583                 goto illegal_op;
3584             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3585                 break;
3586             }
3587             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
3588             tcg_gen_ld32u_tl(s->T0, tcg_env,
3589                              offsetof(CPUX86State, tr.selector));
3590             ot = mod == 3 ? dflag : MO_16;
3591             gen_st_modrm(env, s, modrm, ot);
3592             break;
3593         case 3: /* ltr */
3594             if (!PE(s) || VM86(s))
3595                 goto illegal_op;
3596             if (check_cpl0(s)) {
3597                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
3598                 gen_ld_modrm(env, s, modrm, MO_16);
3599                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3600                 gen_helper_ltr(tcg_env, s->tmp2_i32);
3601             }
3602             break;
3603         case 4: /* verr */
3604         case 5: /* verw */
3605             if (!PE(s) || VM86(s))
3606                 goto illegal_op;
3607             gen_ld_modrm(env, s, modrm, MO_16);
3608             gen_update_cc_op(s);
3609             if (op == 4) {
3610                 gen_helper_verr(tcg_env, s->T0);
3611             } else {
3612                 gen_helper_verw(tcg_env, s->T0);
3613             }
3614             assume_cc_op(s, CC_OP_EFLAGS);
3615             break;
3616         default:
3617             goto unknown_op;
3618         }
3619         break;
3620 
3621     case 0x101:
3622         modrm = x86_ldub_code(env, s);
3623         switch (modrm) {
3624         CASE_MODRM_MEM_OP(0): /* sgdt */
3625             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3626                 break;
3627             }
3628             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
3629             gen_lea_modrm(env, s, modrm);
3630             tcg_gen_ld32u_tl(s->T0,
3631                              tcg_env, offsetof(CPUX86State, gdt.limit));
3632             gen_op_st_v(s, MO_16, s->T0, s->A0);
3633             gen_add_A0_im(s, 2);
3634             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3635             /*
3636              * NB: Despite a confusing description in Intel CPU documentation,
3637              *     all 32-bits are written regardless of operand size.
3638              */
3639             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3640             break;
3641 
3642         case 0xc8: /* monitor */
3643             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3644                 goto illegal_op;
3645             }
3646             gen_update_cc_op(s);
3647             gen_update_eip_cur(s);
3648             gen_lea_v_seg(s, cpu_regs[R_EAX], R_DS, s->override);
3649             gen_helper_monitor(tcg_env, s->A0);
3650             break;
3651 
3652         case 0xc9: /* mwait */
3653             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
3654                 goto illegal_op;
3655             }
3656             gen_update_cc_op(s);
3657             gen_update_eip_cur(s);
3658             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
3659             s->base.is_jmp = DISAS_NORETURN;
3660             break;
3661 
3662         case 0xca: /* clac */
3663             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3664                 || CPL(s) != 0) {
3665                 goto illegal_op;
3666             }
3667             gen_reset_eflags(s, AC_MASK);
3668             s->base.is_jmp = DISAS_EOB_NEXT;
3669             break;
3670 
3671         case 0xcb: /* stac */
3672             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
3673                 || CPL(s) != 0) {
3674                 goto illegal_op;
3675             }
3676             gen_set_eflags(s, AC_MASK);
3677             s->base.is_jmp = DISAS_EOB_NEXT;
3678             break;
3679 
3680         CASE_MODRM_MEM_OP(1): /* sidt */
3681             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3682                 break;
3683             }
3684             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
3685             gen_lea_modrm(env, s, modrm);
3686             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
3687             gen_op_st_v(s, MO_16, s->T0, s->A0);
3688             gen_add_A0_im(s, 2);
3689             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3690             /*
3691              * NB: Despite a confusing description in Intel CPU documentation,
3692              *     all 32-bits are written regardless of operand size.
3693              */
3694             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3695             break;
3696 
3697         case 0xd0: /* xgetbv */
3698             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3699                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3700                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3701                 goto illegal_op;
3702             }
3703             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3704             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
3705             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3706             break;
3707 
3708         case 0xd1: /* xsetbv */
3709             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
3710                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3711                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
3712                 goto illegal_op;
3713             }
3714             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
3715             if (!check_cpl0(s)) {
3716                 break;
3717             }
3718             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3719                                   cpu_regs[R_EDX]);
3720             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3721             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
3722             /* End TB because translation flags may change.  */
3723             s->base.is_jmp = DISAS_EOB_NEXT;
3724             break;
3725 
3726         case 0xd8: /* VMRUN */
3727             if (!SVME(s) || !PE(s)) {
3728                 goto illegal_op;
3729             }
3730             if (!check_cpl0(s)) {
3731                 break;
3732             }
3733             gen_update_cc_op(s);
3734             gen_update_eip_cur(s);
3735             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
3736                              cur_insn_len_i32(s));
3737             tcg_gen_exit_tb(NULL, 0);
3738             s->base.is_jmp = DISAS_NORETURN;
3739             break;
3740 
3741         case 0xd9: /* VMMCALL */
3742             if (!SVME(s)) {
3743                 goto illegal_op;
3744             }
3745             gen_update_cc_op(s);
3746             gen_update_eip_cur(s);
3747             gen_helper_vmmcall(tcg_env);
3748             break;
3749 
3750         case 0xda: /* VMLOAD */
3751             if (!SVME(s) || !PE(s)) {
3752                 goto illegal_op;
3753             }
3754             if (!check_cpl0(s)) {
3755                 break;
3756             }
3757             gen_update_cc_op(s);
3758             gen_update_eip_cur(s);
3759             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
3760             break;
3761 
3762         case 0xdb: /* VMSAVE */
3763             if (!SVME(s) || !PE(s)) {
3764                 goto illegal_op;
3765             }
3766             if (!check_cpl0(s)) {
3767                 break;
3768             }
3769             gen_update_cc_op(s);
3770             gen_update_eip_cur(s);
3771             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
3772             break;
3773 
3774         case 0xdc: /* STGI */
3775             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3776                 || !PE(s)) {
3777                 goto illegal_op;
3778             }
3779             if (!check_cpl0(s)) {
3780                 break;
3781             }
3782             gen_update_cc_op(s);
3783             gen_helper_stgi(tcg_env);
3784             s->base.is_jmp = DISAS_EOB_NEXT;
3785             break;
3786 
3787         case 0xdd: /* CLGI */
3788             if (!SVME(s) || !PE(s)) {
3789                 goto illegal_op;
3790             }
3791             if (!check_cpl0(s)) {
3792                 break;
3793             }
3794             gen_update_cc_op(s);
3795             gen_update_eip_cur(s);
3796             gen_helper_clgi(tcg_env);
3797             break;
3798 
3799         case 0xde: /* SKINIT */
3800             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
3801                 || !PE(s)) {
3802                 goto illegal_op;
3803             }
3804             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
3805             /* If not intercepted, not implemented -- raise #UD. */
3806             goto illegal_op;
3807 
3808         case 0xdf: /* INVLPGA */
3809             if (!SVME(s) || !PE(s)) {
3810                 goto illegal_op;
3811             }
3812             if (!check_cpl0(s)) {
3813                 break;
3814             }
3815             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
3816             if (s->aflag == MO_64) {
3817                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
3818             } else {
3819                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
3820             }
3821             gen_helper_flush_page(tcg_env, s->A0);
3822             s->base.is_jmp = DISAS_EOB_NEXT;
3823             break;
3824 
3825         CASE_MODRM_MEM_OP(2): /* lgdt */
3826             if (!check_cpl0(s)) {
3827                 break;
3828             }
3829             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
3830             gen_lea_modrm(env, s, modrm);
3831             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3832             gen_add_A0_im(s, 2);
3833             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3834             if (dflag == MO_16) {
3835                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3836             }
3837             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
3838             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
3839             break;
3840 
3841         CASE_MODRM_MEM_OP(3): /* lidt */
3842             if (!check_cpl0(s)) {
3843                 break;
3844             }
3845             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
3846             gen_lea_modrm(env, s, modrm);
3847             gen_op_ld_v(s, MO_16, s->T1, s->A0);
3848             gen_add_A0_im(s, 2);
3849             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
3850             if (dflag == MO_16) {
3851                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
3852             }
3853             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
3854             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
3855             break;
3856 
3857         CASE_MODRM_OP(4): /* smsw */
3858             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
3859                 break;
3860             }
3861             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
3862             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
3863             /*
3864              * In 32-bit mode, the higher 16 bits of the destination
3865              * register are undefined.  In practice CR0[31:0] is stored
3866              * just like in 64-bit mode.
3867              */
3868             mod = (modrm >> 6) & 3;
3869             ot = (mod != 3 ? MO_16 : s->dflag);
3870             gen_st_modrm(env, s, modrm, ot);
3871             break;
3872         case 0xee: /* rdpkru */
3873             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3874                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3875                 goto illegal_op;
3876             }
3877             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3878             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
3879             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
3880             break;
3881         case 0xef: /* wrpkru */
3882             if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
3883                              | PREFIX_REPZ | PREFIX_REPNZ)) {
3884                 goto illegal_op;
3885             }
3886             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
3887                                   cpu_regs[R_EDX]);
3888             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
3889             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
3890             break;
3891 
3892         CASE_MODRM_OP(6): /* lmsw */
3893             if (!check_cpl0(s)) {
3894                 break;
3895             }
3896             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
3897             gen_ld_modrm(env, s, modrm, MO_16);
3898             /*
3899              * Only the 4 lower bits of CR0 are modified.
3900              * PE cannot be set to zero if already set to one.
3901              */
3902             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
3903             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
3904             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
3905             tcg_gen_or_tl(s->T0, s->T0, s->T1);
3906             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
3907             s->base.is_jmp = DISAS_EOB_NEXT;
3908             break;
3909 
3910         CASE_MODRM_MEM_OP(7): /* invlpg */
3911             if (!check_cpl0(s)) {
3912                 break;
3913             }
3914             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
3915             gen_lea_modrm(env, s, modrm);
3916             gen_helper_flush_page(tcg_env, s->A0);
3917             s->base.is_jmp = DISAS_EOB_NEXT;
3918             break;
3919 
3920         case 0xf8: /* swapgs */
3921 #ifdef TARGET_X86_64
3922             if (CODE64(s)) {
3923                 if (check_cpl0(s)) {
3924                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
3925                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
3926                                   offsetof(CPUX86State, kernelgsbase));
3927                     tcg_gen_st_tl(s->T0, tcg_env,
3928                                   offsetof(CPUX86State, kernelgsbase));
3929                 }
3930                 break;
3931             }
3932 #endif
3933             goto illegal_op;
3934 
3935         case 0xf9: /* rdtscp */
3936             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
3937                 goto illegal_op;
3938             }
3939             gen_update_cc_op(s);
3940             gen_update_eip_cur(s);
3941             translator_io_start(&s->base);
3942             gen_helper_rdtsc(tcg_env);
3943             gen_helper_rdpid(s->T0, tcg_env);
3944             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
3945             break;
3946 
3947         default:
3948             goto unknown_op;
3949         }
3950         break;
3951 
3952     case 0x108: /* invd */
3953     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
3954         if (check_cpl0(s)) {
3955             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
3956             /* nothing to do */
3957         }
3958         break;
3959     case 0x102: /* lar */
3960     case 0x103: /* lsl */
3961         {
3962             TCGLabel *label1;
3963             TCGv t0;
3964             if (!PE(s) || VM86(s))
3965                 goto illegal_op;
3966             ot = dflag != MO_16 ? MO_32 : MO_16;
3967             modrm = x86_ldub_code(env, s);
3968             reg = ((modrm >> 3) & 7) | REX_R(s);
3969             gen_ld_modrm(env, s, modrm, MO_16);
3970             t0 = tcg_temp_new();
3971             gen_update_cc_op(s);
3972             if (b == 0x102) {
3973                 gen_helper_lar(t0, tcg_env, s->T0);
3974             } else {
3975                 gen_helper_lsl(t0, tcg_env, s->T0);
3976             }
3977             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
3978             label1 = gen_new_label();
3979             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
3980             gen_op_mov_reg_v(s, ot, reg, t0);
3981             gen_set_label(label1);
3982             set_cc_op(s, CC_OP_EFLAGS);
3983         }
3984         break;
3985     case 0x11a:
3986         modrm = x86_ldub_code(env, s);
3987         if (s->flags & HF_MPX_EN_MASK) {
3988             mod = (modrm >> 6) & 3;
3989             reg = ((modrm >> 3) & 7) | REX_R(s);
3990             if (prefixes & PREFIX_REPZ) {
3991                 /* bndcl */
3992                 if (reg >= 4
3993                     || (prefixes & PREFIX_LOCK)
3994                     || s->aflag == MO_16) {
3995                     goto illegal_op;
3996                 }
3997                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
3998             } else if (prefixes & PREFIX_REPNZ) {
3999                 /* bndcu */
4000                 if (reg >= 4
4001                     || (prefixes & PREFIX_LOCK)
4002                     || s->aflag == MO_16) {
4003                     goto illegal_op;
4004                 }
4005                 TCGv_i64 notu = tcg_temp_new_i64();
4006                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
4007                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
4008             } else if (prefixes & PREFIX_DATA) {
4009                 /* bndmov -- from reg/mem */
4010                 if (reg >= 4 || s->aflag == MO_16) {
4011                     goto illegal_op;
4012                 }
4013                 if (mod == 3) {
4014                     int reg2 = (modrm & 7) | REX_B(s);
4015                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
4016                         goto illegal_op;
4017                     }
4018                     if (s->flags & HF_MPX_IU_MASK) {
4019                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
4020                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
4021                     }
4022                 } else {
4023                     gen_lea_modrm(env, s, modrm);
4024                     if (CODE64(s)) {
4025                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
4026                                             s->mem_index, MO_LEUQ);
4027                         tcg_gen_addi_tl(s->A0, s->A0, 8);
4028                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
4029                                             s->mem_index, MO_LEUQ);
4030                     } else {
4031                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
4032                                             s->mem_index, MO_LEUL);
4033                         tcg_gen_addi_tl(s->A0, s->A0, 4);
4034                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
4035                                             s->mem_index, MO_LEUL);
4036                     }
4037                     /* bnd registers are now in-use */
4038                     gen_set_hflag(s, HF_MPX_IU_MASK);
4039                 }
4040             } else if (mod != 3) {
4041                 /* bndldx */
4042                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4043                 if (reg >= 4
4044                     || (prefixes & PREFIX_LOCK)
4045                     || s->aflag == MO_16
4046                     || a.base < -1) {
4047                     goto illegal_op;
4048                 }
4049                 if (a.base >= 0) {
4050                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
4051                 } else {
4052                     tcg_gen_movi_tl(s->A0, 0);
4053                 }
4054                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
4055                 if (a.index >= 0) {
4056                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
4057                 } else {
4058                     tcg_gen_movi_tl(s->T0, 0);
4059                 }
4060                 if (CODE64(s)) {
4061                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
4062                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
4063                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
4064                 } else {
4065                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
4066                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
4067                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
4068                 }
4069                 gen_set_hflag(s, HF_MPX_IU_MASK);
4070             }
4071         }
4072         gen_nop_modrm(env, s, modrm);
4073         break;
4074     case 0x11b:
4075         modrm = x86_ldub_code(env, s);
4076         if (s->flags & HF_MPX_EN_MASK) {
4077             mod = (modrm >> 6) & 3;
4078             reg = ((modrm >> 3) & 7) | REX_R(s);
4079             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
4080                 /* bndmk */
4081                 if (reg >= 4
4082                     || (prefixes & PREFIX_LOCK)
4083                     || s->aflag == MO_16) {
4084                     goto illegal_op;
4085                 }
4086                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4087                 if (a.base >= 0) {
4088                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
4089                     if (!CODE64(s)) {
4090                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
4091                     }
4092                 } else if (a.base == -1) {
4093                     /* no base register has lower bound of 0 */
4094                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
4095                 } else {
4096                     /* rip-relative generates #ud */
4097                     goto illegal_op;
4098                 }
4099                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
4100                 if (!CODE64(s)) {
4101                     tcg_gen_ext32u_tl(s->A0, s->A0);
4102                 }
4103                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
4104                 /* bnd registers are now in-use */
4105                 gen_set_hflag(s, HF_MPX_IU_MASK);
4106                 break;
4107             } else if (prefixes & PREFIX_REPNZ) {
4108                 /* bndcn */
4109                 if (reg >= 4
4110                     || (prefixes & PREFIX_LOCK)
4111                     || s->aflag == MO_16) {
4112                     goto illegal_op;
4113                 }
4114                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
4115             } else if (prefixes & PREFIX_DATA) {
4116                 /* bndmov -- to reg/mem */
4117                 if (reg >= 4 || s->aflag == MO_16) {
4118                     goto illegal_op;
4119                 }
4120                 if (mod == 3) {
4121                     int reg2 = (modrm & 7) | REX_B(s);
4122                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
4123                         goto illegal_op;
4124                     }
4125                     if (s->flags & HF_MPX_IU_MASK) {
4126                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
4127                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
4128                     }
4129                 } else {
4130                     gen_lea_modrm(env, s, modrm);
4131                     if (CODE64(s)) {
4132                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
4133                                             s->mem_index, MO_LEUQ);
4134                         tcg_gen_addi_tl(s->A0, s->A0, 8);
4135                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
4136                                             s->mem_index, MO_LEUQ);
4137                     } else {
4138                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
4139                                             s->mem_index, MO_LEUL);
4140                         tcg_gen_addi_tl(s->A0, s->A0, 4);
4141                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
4142                                             s->mem_index, MO_LEUL);
4143                     }
4144                 }
4145             } else if (mod != 3) {
4146                 /* bndstx */
4147                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4148                 if (reg >= 4
4149                     || (prefixes & PREFIX_LOCK)
4150                     || s->aflag == MO_16
4151                     || a.base < -1) {
4152                     goto illegal_op;
4153                 }
4154                 if (a.base >= 0) {
4155                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
4156                 } else {
4157                     tcg_gen_movi_tl(s->A0, 0);
4158                 }
4159                 gen_lea_v_seg(s, s->A0, a.def_seg, s->override);
4160                 if (a.index >= 0) {
4161                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
4162                 } else {
4163                     tcg_gen_movi_tl(s->T0, 0);
4164                 }
4165                 if (CODE64(s)) {
4166                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
4167                                         cpu_bndl[reg], cpu_bndu[reg]);
4168                 } else {
4169                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
4170                                         cpu_bndl[reg], cpu_bndu[reg]);
4171                 }
4172             }
4173         }
4174         gen_nop_modrm(env, s, modrm);
4175         break;
4176 
4177     case 0x120: /* mov reg, crN */
4178     case 0x122: /* mov crN, reg */
4179         if (!check_cpl0(s)) {
4180             break;
4181         }
4182         modrm = x86_ldub_code(env, s);
4183         /*
4184          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
4185          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
4186          * processors all show that the mod bits are assumed to be 1's,
4187          * regardless of actual values.
4188          */
4189         rm = (modrm & 7) | REX_B(s);
4190         reg = ((modrm >> 3) & 7) | REX_R(s);
4191         switch (reg) {
4192         case 0:
4193             if ((prefixes & PREFIX_LOCK) &&
4194                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
4195                 reg = 8;
4196             }
4197             break;
4198         case 2:
4199         case 3:
4200         case 4:
4201         case 8:
4202             break;
4203         default:
4204             goto unknown_op;
4205         }
4206         ot  = (CODE64(s) ? MO_64 : MO_32);
4207 
4208         translator_io_start(&s->base);
4209         if (b & 2) {
4210             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
4211             gen_op_mov_v_reg(s, ot, s->T0, rm);
4212             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
4213             s->base.is_jmp = DISAS_EOB_NEXT;
4214         } else {
4215             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
4216             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
4217             gen_op_mov_reg_v(s, ot, rm, s->T0);
4218         }
4219         break;
4220 
4221     case 0x121: /* mov reg, drN */
4222     case 0x123: /* mov drN, reg */
4223         if (check_cpl0(s)) {
4224             modrm = x86_ldub_code(env, s);
4225             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
4226              * AMD documentation (24594.pdf) and testing of
4227              * intel 386 and 486 processors all show that the mod bits
4228              * are assumed to be 1's, regardless of actual values.
4229              */
4230             rm = (modrm & 7) | REX_B(s);
4231             reg = ((modrm >> 3) & 7) | REX_R(s);
4232             if (CODE64(s))
4233                 ot = MO_64;
4234             else
4235                 ot = MO_32;
4236             if (reg >= 8) {
4237                 goto illegal_op;
4238             }
4239             if (b & 2) {
4240                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
4241                 gen_op_mov_v_reg(s, ot, s->T0, rm);
4242                 tcg_gen_movi_i32(s->tmp2_i32, reg);
4243                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
4244                 s->base.is_jmp = DISAS_EOB_NEXT;
4245             } else {
4246                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
4247                 tcg_gen_movi_i32(s->tmp2_i32, reg);
4248                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
4249                 gen_op_mov_reg_v(s, ot, rm, s->T0);
4250             }
4251         }
4252         break;
4253     case 0x106: /* clts */
4254         if (check_cpl0(s)) {
4255             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
4256             gen_helper_clts(tcg_env);
4257             /* abort block because static cpu state changed */
4258             s->base.is_jmp = DISAS_EOB_NEXT;
4259         }
4260         break;
4261     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
4262     case 0x1ae:
4263         modrm = x86_ldub_code(env, s);
4264         switch (modrm) {
4265         CASE_MODRM_MEM_OP(0): /* fxsave */
4266             if (!(s->cpuid_features & CPUID_FXSR)
4267                 || (prefixes & PREFIX_LOCK)) {
4268                 goto illegal_op;
4269             }
4270             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
4271                 gen_exception(s, EXCP07_PREX);
4272                 break;
4273             }
4274             gen_lea_modrm(env, s, modrm);
4275             gen_helper_fxsave(tcg_env, s->A0);
4276             break;
4277 
4278         CASE_MODRM_MEM_OP(1): /* fxrstor */
4279             if (!(s->cpuid_features & CPUID_FXSR)
4280                 || (prefixes & PREFIX_LOCK)) {
4281                 goto illegal_op;
4282             }
4283             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
4284                 gen_exception(s, EXCP07_PREX);
4285                 break;
4286             }
4287             gen_lea_modrm(env, s, modrm);
4288             gen_helper_fxrstor(tcg_env, s->A0);
4289             break;
4290 
4291         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
4292             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
4293                 goto illegal_op;
4294             }
4295             if (s->flags & HF_TS_MASK) {
4296                 gen_exception(s, EXCP07_PREX);
4297                 break;
4298             }
4299             gen_lea_modrm(env, s, modrm);
4300             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
4301             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
4302             break;
4303 
4304         CASE_MODRM_MEM_OP(3): /* stmxcsr */
4305             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
4306                 goto illegal_op;
4307             }
4308             if (s->flags & HF_TS_MASK) {
4309                 gen_exception(s, EXCP07_PREX);
4310                 break;
4311             }
4312             gen_helper_update_mxcsr(tcg_env);
4313             gen_lea_modrm(env, s, modrm);
4314             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
4315             gen_op_st_v(s, MO_32, s->T0, s->A0);
4316             break;
4317 
4318         CASE_MODRM_MEM_OP(4): /* xsave */
4319             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
4320                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
4321                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
4322                 goto illegal_op;
4323             }
4324             gen_lea_modrm(env, s, modrm);
4325             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
4326                                   cpu_regs[R_EDX]);
4327             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
4328             break;
4329 
4330         CASE_MODRM_MEM_OP(5): /* xrstor */
4331             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
4332                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
4333                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
4334                 goto illegal_op;
4335             }
4336             gen_lea_modrm(env, s, modrm);
4337             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
4338                                   cpu_regs[R_EDX]);
4339             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
4340             /* XRSTOR is how MPX is enabled, which changes how
4341                we translate.  Thus we need to end the TB.  */
4342             s->base.is_jmp = DISAS_EOB_NEXT;
4343             break;
4344 
4345         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
4346             if (prefixes & PREFIX_LOCK) {
4347                 goto illegal_op;
4348             }
4349             if (prefixes & PREFIX_DATA) {
4350                 /* clwb */
4351                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
4352                     goto illegal_op;
4353                 }
4354                 gen_nop_modrm(env, s, modrm);
4355             } else {
4356                 /* xsaveopt */
4357                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
4358                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
4359                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
4360                     goto illegal_op;
4361                 }
4362                 gen_lea_modrm(env, s, modrm);
4363                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
4364                                       cpu_regs[R_EDX]);
4365                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
4366             }
4367             break;
4368 
4369         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
4370             if (prefixes & PREFIX_LOCK) {
4371                 goto illegal_op;
4372             }
4373             if (prefixes & PREFIX_DATA) {
4374                 /* clflushopt */
4375                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
4376                     goto illegal_op;
4377                 }
4378             } else {
4379                 /* clflush */
4380                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
4381                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
4382                     goto illegal_op;
4383                 }
4384             }
4385             gen_nop_modrm(env, s, modrm);
4386             break;
4387 
4388         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
4389         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
4390         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
4391         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
4392             if (CODE64(s)
4393                 && (prefixes & PREFIX_REPZ)
4394                 && !(prefixes & PREFIX_LOCK)
4395                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
4396                 TCGv base, treg, src, dst;
4397 
4398                 /* Preserve hflags bits by testing CR4 at runtime.  */
4399                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
4400                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
4401 
4402                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
4403                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
4404 
4405                 if (modrm & 0x10) {
4406                     /* wr*base */
4407                     dst = base, src = treg;
4408                 } else {
4409                     /* rd*base */
4410                     dst = treg, src = base;
4411                 }
4412 
4413                 if (s->dflag == MO_32) {
4414                     tcg_gen_ext32u_tl(dst, src);
4415                 } else {
4416                     tcg_gen_mov_tl(dst, src);
4417                 }
4418                 break;
4419             }
4420             goto unknown_op;
4421 
4422         case 0xf8 ... 0xff: /* sfence */
4423             if (!(s->cpuid_features & CPUID_SSE)
4424                 || (prefixes & PREFIX_LOCK)) {
4425                 goto illegal_op;
4426             }
4427             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
4428             break;
4429         case 0xe8 ... 0xef: /* lfence */
4430             if (!(s->cpuid_features & CPUID_SSE)
4431                 || (prefixes & PREFIX_LOCK)) {
4432                 goto illegal_op;
4433             }
4434             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
4435             break;
4436         case 0xf0 ... 0xf7: /* mfence */
4437             if (!(s->cpuid_features & CPUID_SSE2)
4438                 || (prefixes & PREFIX_LOCK)) {
4439                 goto illegal_op;
4440             }
4441             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
4442             break;
4443 
4444         default:
4445             goto unknown_op;
4446         }
4447         break;
4448 
4449     case 0x1aa: /* rsm */
4450         gen_svm_check_intercept(s, SVM_EXIT_RSM);
4451         if (!(s->flags & HF_SMM_MASK))
4452             goto illegal_op;
4453 #ifdef CONFIG_USER_ONLY
4454         /* we should not be in SMM mode */
4455         g_assert_not_reached();
4456 #else
4457         gen_helper_rsm(tcg_env);
4458         assume_cc_op(s, CC_OP_EFLAGS);
4459 #endif /* CONFIG_USER_ONLY */
4460         s->base.is_jmp = DISAS_EOB_ONLY;
4461         break;
4462     case 0x1b8: /* SSE4.2 popcnt */
4463         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
4464              PREFIX_REPZ)
4465             goto illegal_op;
4466         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
4467             goto illegal_op;
4468 
4469         modrm = x86_ldub_code(env, s);
4470         reg = ((modrm >> 3) & 7) | REX_R(s);
4471 
4472         ot = dflag;
4473         gen_ld_modrm(env, s, modrm, ot);
4474         gen_extu(ot, s->T0);
4475         tcg_gen_mov_tl(cpu_cc_src, s->T0);
4476         tcg_gen_ctpop_tl(s->T0, s->T0);
4477         gen_op_mov_reg_v(s, ot, reg, s->T0);
4478 
4479         set_cc_op(s, CC_OP_POPCNT);
4480         break;
4481     default:
4482         g_assert_not_reached();
4483     }
4484     return;
4485  illegal_op:
4486     gen_illegal_opcode(s);
4487     return;
4488  unknown_op:
4489     gen_unknown_opcode(env, s);
4490 }
4491 
4492 #include "decode-new.h"
4493 #include "emit.c.inc"
4494 #include "decode-new.c.inc"
4495 
tcg_x86_init(void)4496 void tcg_x86_init(void)
4497 {
4498     static const char reg_names[CPU_NB_REGS][4] = {
4499 #ifdef TARGET_X86_64
4500         [R_EAX] = "rax",
4501         [R_EBX] = "rbx",
4502         [R_ECX] = "rcx",
4503         [R_EDX] = "rdx",
4504         [R_ESI] = "rsi",
4505         [R_EDI] = "rdi",
4506         [R_EBP] = "rbp",
4507         [R_ESP] = "rsp",
4508         [8]  = "r8",
4509         [9]  = "r9",
4510         [10] = "r10",
4511         [11] = "r11",
4512         [12] = "r12",
4513         [13] = "r13",
4514         [14] = "r14",
4515         [15] = "r15",
4516 #else
4517         [R_EAX] = "eax",
4518         [R_EBX] = "ebx",
4519         [R_ECX] = "ecx",
4520         [R_EDX] = "edx",
4521         [R_ESI] = "esi",
4522         [R_EDI] = "edi",
4523         [R_EBP] = "ebp",
4524         [R_ESP] = "esp",
4525 #endif
4526     };
4527     static const char eip_name[] = {
4528 #ifdef TARGET_X86_64
4529         "rip"
4530 #else
4531         "eip"
4532 #endif
4533     };
4534     static const char seg_base_names[6][8] = {
4535         [R_CS] = "cs_base",
4536         [R_DS] = "ds_base",
4537         [R_ES] = "es_base",
4538         [R_FS] = "fs_base",
4539         [R_GS] = "gs_base",
4540         [R_SS] = "ss_base",
4541     };
4542     static const char bnd_regl_names[4][8] = {
4543         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
4544     };
4545     static const char bnd_regu_names[4][8] = {
4546         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
4547     };
4548     int i;
4549 
4550     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
4551                                        offsetof(CPUX86State, cc_op), "cc_op");
4552     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
4553                                     "cc_dst");
4554     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
4555                                     "cc_src");
4556     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
4557                                      "cc_src2");
4558     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
4559 
4560     for (i = 0; i < CPU_NB_REGS; ++i) {
4561         cpu_regs[i] = tcg_global_mem_new(tcg_env,
4562                                          offsetof(CPUX86State, regs[i]),
4563                                          reg_names[i]);
4564     }
4565 
4566     for (i = 0; i < 6; ++i) {
4567         cpu_seg_base[i]
4568             = tcg_global_mem_new(tcg_env,
4569                                  offsetof(CPUX86State, segs[i].base),
4570                                  seg_base_names[i]);
4571     }
4572 
4573     for (i = 0; i < 4; ++i) {
4574         cpu_bndl[i]
4575             = tcg_global_mem_new_i64(tcg_env,
4576                                      offsetof(CPUX86State, bnd_regs[i].lb),
4577                                      bnd_regl_names[i]);
4578         cpu_bndu[i]
4579             = tcg_global_mem_new_i64(tcg_env,
4580                                      offsetof(CPUX86State, bnd_regs[i].ub),
4581                                      bnd_regu_names[i]);
4582     }
4583 }
4584 
i386_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)4585 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
4586 {
4587     DisasContext *dc = container_of(dcbase, DisasContext, base);
4588     CPUX86State *env = cpu_env(cpu);
4589     uint32_t flags = dc->base.tb->flags;
4590     uint32_t cflags = tb_cflags(dc->base.tb);
4591     int cpl = (flags >> HF_CPL_SHIFT) & 3;
4592     int iopl = (flags >> IOPL_SHIFT) & 3;
4593 
4594     dc->cs_base = dc->base.tb->cs_base;
4595     dc->pc_save = dc->base.pc_next;
4596     dc->flags = flags;
4597 #ifndef CONFIG_USER_ONLY
4598     dc->cpl = cpl;
4599     dc->iopl = iopl;
4600 #endif
4601 
4602     /* We make some simplifying assumptions; validate they're correct. */
4603     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
4604     g_assert(CPL(dc) == cpl);
4605     g_assert(IOPL(dc) == iopl);
4606     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
4607     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
4608     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
4609     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
4610     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
4611     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
4612     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
4613     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
4614 
4615     dc->cc_op = CC_OP_DYNAMIC;
4616     dc->cc_op_dirty = false;
4617     /* select memory access functions */
4618     dc->mem_index = cpu_mmu_index(cpu, false);
4619     dc->cpuid_features = env->features[FEAT_1_EDX];
4620     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
4621     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
4622     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
4623     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
4624     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
4625     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
4626     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
4627     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
4628                     (flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
4629     /*
4630      * If jmp_opt, we want to handle each string instruction individually.
4631      * For icount also disable repz optimization so that each iteration
4632      * is accounted separately.
4633      */
4634     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
4635 
4636     dc->T0 = tcg_temp_new();
4637     dc->T1 = tcg_temp_new();
4638     dc->A0 = tcg_temp_new();
4639 
4640     dc->tmp0 = tcg_temp_new();
4641     dc->tmp1_i64 = tcg_temp_new_i64();
4642     dc->tmp2_i32 = tcg_temp_new_i32();
4643     dc->tmp3_i32 = tcg_temp_new_i32();
4644     dc->tmp4 = tcg_temp_new();
4645     dc->cc_srcT = tcg_temp_new();
4646 }
4647 
i386_tr_tb_start(DisasContextBase * db,CPUState * cpu)4648 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
4649 {
4650 }
4651 
i386_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)4652 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
4653 {
4654     DisasContext *dc = container_of(dcbase, DisasContext, base);
4655     target_ulong pc_arg = dc->base.pc_next;
4656 
4657     dc->prev_insn_start = dc->base.insn_start;
4658     dc->prev_insn_end = tcg_last_op();
4659     if (tb_cflags(dcbase->tb) & CF_PCREL) {
4660         pc_arg &= ~TARGET_PAGE_MASK;
4661     }
4662     tcg_gen_insn_start(pc_arg, dc->cc_op);
4663 }
4664 
i386_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)4665 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
4666 {
4667     DisasContext *dc = container_of(dcbase, DisasContext, base);
4668     bool orig_cc_op_dirty = dc->cc_op_dirty;
4669     CCOp orig_cc_op = dc->cc_op;
4670     target_ulong orig_pc_save = dc->pc_save;
4671 
4672 #ifdef TARGET_VSYSCALL_PAGE
4673     /*
4674      * Detect entry into the vsyscall page and invoke the syscall.
4675      */
4676     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
4677         gen_exception(dc, EXCP_VSYSCALL);
4678         dc->base.pc_next = dc->pc + 1;
4679         return;
4680     }
4681 #endif
4682 
4683     switch (sigsetjmp(dc->jmpbuf, 0)) {
4684     case 0:
4685         disas_insn(dc, cpu);
4686         break;
4687     case 1:
4688         gen_exception_gpf(dc);
4689         break;
4690     case 2:
4691         /* Restore state that may affect the next instruction. */
4692         dc->pc = dc->base.pc_next;
4693         /*
4694          * TODO: These save/restore can be removed after the table-based
4695          * decoder is complete; we will be decoding the insn completely
4696          * before any code generation that might affect these variables.
4697          */
4698         dc->cc_op_dirty = orig_cc_op_dirty;
4699         dc->cc_op = orig_cc_op;
4700         dc->pc_save = orig_pc_save;
4701         /* END TODO */
4702         dc->base.num_insns--;
4703         tcg_remove_ops_after(dc->prev_insn_end);
4704         dc->base.insn_start = dc->prev_insn_start;
4705         dc->base.is_jmp = DISAS_TOO_MANY;
4706         return;
4707     default:
4708         g_assert_not_reached();
4709     }
4710 
4711     /*
4712      * Instruction decoding completed (possibly with #GP if the
4713      * 15-byte boundary was exceeded).
4714      */
4715     dc->base.pc_next = dc->pc;
4716     if (dc->base.is_jmp == DISAS_NEXT) {
4717         if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
4718             /*
4719              * If single step mode, we generate only one instruction and
4720              * generate an exception.
4721              * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
4722              * the flag and abort the translation to give the irqs a
4723              * chance to happen.
4724              */
4725             dc->base.is_jmp = DISAS_EOB_NEXT;
4726         } else if (!is_same_page(&dc->base, dc->base.pc_next)) {
4727             dc->base.is_jmp = DISAS_TOO_MANY;
4728         }
4729     }
4730 }
4731 
i386_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)4732 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
4733 {
4734     DisasContext *dc = container_of(dcbase, DisasContext, base);
4735 
4736     switch (dc->base.is_jmp) {
4737     case DISAS_NORETURN:
4738         break;
4739     case DISAS_TOO_MANY:
4740         gen_update_cc_op(dc);
4741         gen_jmp_rel_csize(dc, 0, 0);
4742         break;
4743     case DISAS_EOB_NEXT:
4744     case DISAS_EOB_INHIBIT_IRQ:
4745         assert(dc->base.pc_next == dc->pc);
4746         gen_update_eip_cur(dc);
4747         /* fall through */
4748     case DISAS_EOB_ONLY:
4749     case DISAS_EOB_RECHECK_TF:
4750     case DISAS_JUMP:
4751         gen_eob(dc, dc->base.is_jmp);
4752         break;
4753     default:
4754         g_assert_not_reached();
4755     }
4756 }
4757 
4758 static const TranslatorOps i386_tr_ops = {
4759     .init_disas_context = i386_tr_init_disas_context,
4760     .tb_start           = i386_tr_tb_start,
4761     .insn_start         = i386_tr_insn_start,
4762     .translate_insn     = i386_tr_translate_insn,
4763     .tb_stop            = i386_tr_tb_stop,
4764 };
4765 
4766 /* generate intermediate code for basic block 'tb'.  */
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)4767 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
4768                            vaddr pc, void *host_pc)
4769 {
4770     DisasContext dc;
4771 
4772     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
4773 }
4774