xref: /qemu/target/i386/tcg/translate.c (revision 8b7b9c5c)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_xsave_features;
126 
127     /* TCG local temps */
128     TCGv cc_srcT;
129     TCGv A0;
130     TCGv T0;
131     TCGv T1;
132 
133     /* TCG local register indexes (only used inside old micro ops) */
134     TCGv tmp0;
135     TCGv tmp4;
136     TCGv_i32 tmp2_i32;
137     TCGv_i32 tmp3_i32;
138     TCGv_i64 tmp1_i64;
139 
140     sigjmp_buf jmpbuf;
141     TCGOp *prev_insn_end;
142 } DisasContext;
143 
144 #define DISAS_EOB_ONLY         DISAS_TARGET_0
145 #define DISAS_EOB_NEXT         DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
147 #define DISAS_JUMP             DISAS_TARGET_3
148 
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
151 #define PE(S)     true
152 #define CPL(S)    3
153 #define IOPL(S)   0
154 #define SVME(S)   false
155 #define GUEST(S)  false
156 #else
157 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S)    ((S)->cpl)
159 #define IOPL(S)   ((S)->iopl)
160 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
162 #endif
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S)   false
165 #define CODE32(S) true
166 #define SS32(S)   true
167 #define ADDSEG(S) false
168 #else
169 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
173 #endif
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #elif defined(CONFIG_USER_ONLY)
177 #define CODE64(S) true
178 #else
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
180 #endif
181 #if defined(CONFIG_SOFTMMU) && !defined(TARGET_X86_64)
182 #define LMA(S)    false
183 #else
184 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
185 #endif
186 
187 #ifdef TARGET_X86_64
188 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
189 #define REX_W(S)       ((S)->vex_w)
190 #define REX_R(S)       ((S)->rex_r + 0)
191 #define REX_X(S)       ((S)->rex_x + 0)
192 #define REX_B(S)       ((S)->rex_b + 0)
193 #else
194 #define REX_PREFIX(S)  false
195 #define REX_W(S)       false
196 #define REX_R(S)       0
197 #define REX_X(S)       0
198 #define REX_B(S)       0
199 #endif
200 
201 /*
202  * Many sysemu-only helpers are not reachable for user-only.
203  * Define stub generators here, so that we need not either sprinkle
204  * ifdefs through the translator, nor provide the helper function.
205  */
206 #define STUB_HELPER(NAME, ...) \
207     static inline void gen_helper_##NAME(__VA_ARGS__) \
208     { qemu_build_not_reached(); }
209 
210 #ifdef CONFIG_USER_ONLY
211 STUB_HELPER(clgi, TCGv_env env)
212 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
213 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
214 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
215 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
216 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
218 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
219 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
220 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(rdmsr, TCGv_env env)
223 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
224 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
225 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
226 STUB_HELPER(stgi, TCGv_env env)
227 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
228 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
229 STUB_HELPER(vmmcall, TCGv_env env)
230 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
231 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
232 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
233 STUB_HELPER(wrmsr, TCGv_env env)
234 #endif
235 
236 static void gen_eob(DisasContext *s);
237 static void gen_jr(DisasContext *s);
238 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
239 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
240 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
241 static void gen_exception_gpf(DisasContext *s);
242 
243 /* i386 arith/logic operations */
244 enum {
245     OP_ADDL,
246     OP_ORL,
247     OP_ADCL,
248     OP_SBBL,
249     OP_ANDL,
250     OP_SUBL,
251     OP_XORL,
252     OP_CMPL,
253 };
254 
255 /* i386 shift ops */
256 enum {
257     OP_ROL,
258     OP_ROR,
259     OP_RCL,
260     OP_RCR,
261     OP_SHL,
262     OP_SHR,
263     OP_SHL1, /* undocumented */
264     OP_SAR = 7,
265 };
266 
267 enum {
268     JCC_O,
269     JCC_B,
270     JCC_Z,
271     JCC_BE,
272     JCC_S,
273     JCC_P,
274     JCC_L,
275     JCC_LE,
276 };
277 
278 enum {
279     /* I386 int registers */
280     OR_EAX,   /* MUST be even numbered */
281     OR_ECX,
282     OR_EDX,
283     OR_EBX,
284     OR_ESP,
285     OR_EBP,
286     OR_ESI,
287     OR_EDI,
288 
289     OR_TMP0 = 16,    /* temporary operand register */
290     OR_TMP1,
291     OR_A0, /* temporary register used when doing address evaluation */
292 };
293 
294 enum {
295     USES_CC_DST  = 1,
296     USES_CC_SRC  = 2,
297     USES_CC_SRC2 = 4,
298     USES_CC_SRCT = 8,
299 };
300 
301 /* Bit set if the global variable is live after setting CC_OP to X.  */
302 static const uint8_t cc_op_live[CC_OP_NB] = {
303     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
304     [CC_OP_EFLAGS] = USES_CC_SRC,
305     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
308     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
309     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
311     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
312     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
318     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
319     [CC_OP_CLR] = 0,
320     [CC_OP_POPCNT] = USES_CC_SRC,
321 };
322 
323 static void set_cc_op(DisasContext *s, CCOp op)
324 {
325     int dead;
326 
327     if (s->cc_op == op) {
328         return;
329     }
330 
331     /* Discard CC computation that will no longer be used.  */
332     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
333     if (dead & USES_CC_DST) {
334         tcg_gen_discard_tl(cpu_cc_dst);
335     }
336     if (dead & USES_CC_SRC) {
337         tcg_gen_discard_tl(cpu_cc_src);
338     }
339     if (dead & USES_CC_SRC2) {
340         tcg_gen_discard_tl(cpu_cc_src2);
341     }
342     if (dead & USES_CC_SRCT) {
343         tcg_gen_discard_tl(s->cc_srcT);
344     }
345 
346     if (op == CC_OP_DYNAMIC) {
347         /* The DYNAMIC setting is translator only, and should never be
348            stored.  Thus we always consider it clean.  */
349         s->cc_op_dirty = false;
350     } else {
351         /* Discard any computed CC_OP value (see shifts).  */
352         if (s->cc_op == CC_OP_DYNAMIC) {
353             tcg_gen_discard_i32(cpu_cc_op);
354         }
355         s->cc_op_dirty = true;
356     }
357     s->cc_op = op;
358 }
359 
360 static void gen_update_cc_op(DisasContext *s)
361 {
362     if (s->cc_op_dirty) {
363         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
364         s->cc_op_dirty = false;
365     }
366 }
367 
368 #ifdef TARGET_X86_64
369 
370 #define NB_OP_SIZES 4
371 
372 #else /* !TARGET_X86_64 */
373 
374 #define NB_OP_SIZES 3
375 
376 #endif /* !TARGET_X86_64 */
377 
378 #if HOST_BIG_ENDIAN
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
384 #else
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
390 #endif
391 
392 /* In instruction encodings for byte register accesses the
393  * register number usually indicates "low 8 bits of register N";
394  * however there are some special cases where N 4..7 indicates
395  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396  * true for this special case, false otherwise.
397  */
398 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
399 {
400     /* Any time the REX prefix is present, byte registers are uniform */
401     if (reg < 4 || REX_PREFIX(s)) {
402         return false;
403     }
404     return true;
405 }
406 
407 /* Select the size of a push/pop operation.  */
408 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
409 {
410     if (CODE64(s)) {
411         return ot == MO_16 ? MO_16 : MO_64;
412     } else {
413         return ot;
414     }
415 }
416 
417 /* Select the size of the stack pointer.  */
418 static inline MemOp mo_stacksize(DisasContext *s)
419 {
420     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
421 }
422 
423 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
424 static inline MemOp mo_64_32(MemOp ot)
425 {
426 #ifdef TARGET_X86_64
427     return ot == MO_64 ? MO_64 : MO_32;
428 #else
429     return MO_32;
430 #endif
431 }
432 
433 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
434    byte vs word opcodes.  */
435 static inline MemOp mo_b_d(int b, MemOp ot)
436 {
437     return b & 1 ? ot : MO_8;
438 }
439 
440 /* Select size 8 if lsb of B is clear, else OT capped at 32.
441    Used for decoding operand size of port opcodes.  */
442 static inline MemOp mo_b_d32(int b, MemOp ot)
443 {
444     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
445 }
446 
447 /* Compute the result of writing t0 to the OT-sized register REG.
448  *
449  * If DEST is NULL, store the result into the register and return the
450  * register's TCGv.
451  *
452  * If DEST is not NULL, store the result into DEST and return the
453  * register's TCGv.
454  */
455 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
456 {
457     switch(ot) {
458     case MO_8:
459         if (byte_reg_is_xH(s, reg)) {
460             dest = dest ? dest : cpu_regs[reg - 4];
461             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
462             return cpu_regs[reg - 4];
463         }
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
466         break;
467     case MO_16:
468         dest = dest ? dest : cpu_regs[reg];
469         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
470         break;
471     case MO_32:
472         /* For x86_64, this sets the higher half of register to zero.
473            For i386, this is equivalent to a mov. */
474         dest = dest ? dest : cpu_regs[reg];
475         tcg_gen_ext32u_tl(dest, t0);
476         break;
477 #ifdef TARGET_X86_64
478     case MO_64:
479         dest = dest ? dest : cpu_regs[reg];
480         tcg_gen_mov_tl(dest, t0);
481         break;
482 #endif
483     default:
484         g_assert_not_reached();
485     }
486     return cpu_regs[reg];
487 }
488 
489 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
490 {
491     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
492 }
493 
494 static inline
495 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
496 {
497     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
498         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
499     } else {
500         tcg_gen_mov_tl(t0, cpu_regs[reg]);
501     }
502 }
503 
504 static void gen_add_A0_im(DisasContext *s, int val)
505 {
506     tcg_gen_addi_tl(s->A0, s->A0, val);
507     if (!CODE64(s)) {
508         tcg_gen_ext32u_tl(s->A0, s->A0);
509     }
510 }
511 
512 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
513 {
514     tcg_gen_mov_tl(cpu_eip, dest);
515     s->pc_save = -1;
516 }
517 
518 static inline
519 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
520 {
521     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
522     gen_op_mov_reg_v(s, size, reg, s->tmp0);
523 }
524 
525 static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
526 {
527     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
528     gen_op_mov_reg_v(s, size, reg, s->tmp0);
529 }
530 
531 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532 {
533     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
534 }
535 
536 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
537 {
538     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
539 }
540 
541 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
542 {
543     if (d == OR_TMP0) {
544         gen_op_st_v(s, idx, s->T0, s->A0);
545     } else {
546         gen_op_mov_reg_v(s, idx, d, s->T0);
547     }
548 }
549 
550 static void gen_update_eip_cur(DisasContext *s)
551 {
552     assert(s->pc_save != -1);
553     if (tb_cflags(s->base.tb) & CF_PCREL) {
554         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
555     } else {
556         tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
557     }
558     s->pc_save = s->base.pc_next;
559 }
560 
561 static void gen_update_eip_next(DisasContext *s)
562 {
563     assert(s->pc_save != -1);
564     if (tb_cflags(s->base.tb) & CF_PCREL) {
565         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
566     } else {
567         tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
568     }
569     s->pc_save = s->pc;
570 }
571 
572 static int cur_insn_len(DisasContext *s)
573 {
574     return s->pc - s->base.pc_next;
575 }
576 
577 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
578 {
579     return tcg_constant_i32(cur_insn_len(s));
580 }
581 
582 static TCGv_i32 eip_next_i32(DisasContext *s)
583 {
584     assert(s->pc_save != -1);
585     /*
586      * This function has two users: lcall_real (always 16-bit mode), and
587      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
588      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
589      * why passing a 32-bit value isn't broken.  To avoid using this where
590      * we shouldn't, return -1 in 64-bit mode so that execution goes into
591      * the weeds quickly.
592      */
593     if (CODE64(s)) {
594         return tcg_constant_i32(-1);
595     }
596     if (tb_cflags(s->base.tb) & CF_PCREL) {
597         TCGv_i32 ret = tcg_temp_new_i32();
598         tcg_gen_trunc_tl_i32(ret, cpu_eip);
599         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
600         return ret;
601     } else {
602         return tcg_constant_i32(s->pc - s->cs_base);
603     }
604 }
605 
606 static TCGv eip_next_tl(DisasContext *s)
607 {
608     assert(s->pc_save != -1);
609     if (tb_cflags(s->base.tb) & CF_PCREL) {
610         TCGv ret = tcg_temp_new();
611         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
612         return ret;
613     } else {
614         return tcg_constant_tl(s->pc - s->cs_base);
615     }
616 }
617 
618 static TCGv eip_cur_tl(DisasContext *s)
619 {
620     assert(s->pc_save != -1);
621     if (tb_cflags(s->base.tb) & CF_PCREL) {
622         TCGv ret = tcg_temp_new();
623         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
624         return ret;
625     } else {
626         return tcg_constant_tl(s->base.pc_next - s->cs_base);
627     }
628 }
629 
630 /* Compute SEG:REG into A0.  SEG is selected from the override segment
631    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
632    indicate no override.  */
633 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
634                           int def_seg, int ovr_seg)
635 {
636     switch (aflag) {
637 #ifdef TARGET_X86_64
638     case MO_64:
639         if (ovr_seg < 0) {
640             tcg_gen_mov_tl(s->A0, a0);
641             return;
642         }
643         break;
644 #endif
645     case MO_32:
646         /* 32 bit address */
647         if (ovr_seg < 0 && ADDSEG(s)) {
648             ovr_seg = def_seg;
649         }
650         if (ovr_seg < 0) {
651             tcg_gen_ext32u_tl(s->A0, a0);
652             return;
653         }
654         break;
655     case MO_16:
656         /* 16 bit address */
657         tcg_gen_ext16u_tl(s->A0, a0);
658         a0 = s->A0;
659         if (ovr_seg < 0) {
660             if (ADDSEG(s)) {
661                 ovr_seg = def_seg;
662             } else {
663                 return;
664             }
665         }
666         break;
667     default:
668         g_assert_not_reached();
669     }
670 
671     if (ovr_seg >= 0) {
672         TCGv seg = cpu_seg_base[ovr_seg];
673 
674         if (aflag == MO_64) {
675             tcg_gen_add_tl(s->A0, a0, seg);
676         } else if (CODE64(s)) {
677             tcg_gen_ext32u_tl(s->A0, a0);
678             tcg_gen_add_tl(s->A0, s->A0, seg);
679         } else {
680             tcg_gen_add_tl(s->A0, a0, seg);
681             tcg_gen_ext32u_tl(s->A0, s->A0);
682         }
683     }
684 }
685 
686 static inline void gen_string_movl_A0_ESI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
689 }
690 
691 static inline void gen_string_movl_A0_EDI(DisasContext *s)
692 {
693     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
694 }
695 
696 static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
697 {
698     tcg_gen_ld32s_tl(s->T0, tcg_env, offsetof(CPUX86State, df));
699     tcg_gen_shli_tl(s->T0, s->T0, ot);
700 };
701 
702 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
703 {
704     switch (size) {
705     case MO_8:
706         if (sign) {
707             tcg_gen_ext8s_tl(dst, src);
708         } else {
709             tcg_gen_ext8u_tl(dst, src);
710         }
711         return dst;
712     case MO_16:
713         if (sign) {
714             tcg_gen_ext16s_tl(dst, src);
715         } else {
716             tcg_gen_ext16u_tl(dst, src);
717         }
718         return dst;
719 #ifdef TARGET_X86_64
720     case MO_32:
721         if (sign) {
722             tcg_gen_ext32s_tl(dst, src);
723         } else {
724             tcg_gen_ext32u_tl(dst, src);
725         }
726         return dst;
727 #endif
728     default:
729         return src;
730     }
731 }
732 
733 static void gen_extu(MemOp ot, TCGv reg)
734 {
735     gen_ext_tl(reg, reg, ot, false);
736 }
737 
738 static void gen_exts(MemOp ot, TCGv reg)
739 {
740     gen_ext_tl(reg, reg, ot, true);
741 }
742 
743 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
744 {
745     tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
746     gen_extu(s->aflag, s->tmp0);
747     tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
748 }
749 
750 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
751 {
752     gen_op_j_ecx(s, TCG_COND_EQ, label1);
753 }
754 
755 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
756 {
757     gen_op_j_ecx(s, TCG_COND_NE, label1);
758 }
759 
760 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
761 {
762     switch (ot) {
763     case MO_8:
764         gen_helper_inb(v, tcg_env, n);
765         break;
766     case MO_16:
767         gen_helper_inw(v, tcg_env, n);
768         break;
769     case MO_32:
770         gen_helper_inl(v, tcg_env, n);
771         break;
772     default:
773         g_assert_not_reached();
774     }
775 }
776 
777 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
778 {
779     switch (ot) {
780     case MO_8:
781         gen_helper_outb(tcg_env, v, n);
782         break;
783     case MO_16:
784         gen_helper_outw(tcg_env, v, n);
785         break;
786     case MO_32:
787         gen_helper_outl(tcg_env, v, n);
788         break;
789     default:
790         g_assert_not_reached();
791     }
792 }
793 
794 /*
795  * Validate that access to [port, port + 1<<ot) is allowed.
796  * Raise #GP, or VMM exit if not.
797  */
798 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
799                          uint32_t svm_flags)
800 {
801 #ifdef CONFIG_USER_ONLY
802     /*
803      * We do not implement the ioperm(2) syscall, so the TSS check
804      * will always fail.
805      */
806     gen_exception_gpf(s);
807     return false;
808 #else
809     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
810         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
811     }
812     if (GUEST(s)) {
813         gen_update_cc_op(s);
814         gen_update_eip_cur(s);
815         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
816             svm_flags |= SVM_IOIO_REP_MASK;
817         }
818         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
819         gen_helper_svm_check_io(tcg_env, port,
820                                 tcg_constant_i32(svm_flags),
821                                 cur_insn_len_i32(s));
822     }
823     return true;
824 #endif
825 }
826 
827 static void gen_movs(DisasContext *s, MemOp ot)
828 {
829     gen_string_movl_A0_ESI(s);
830     gen_op_ld_v(s, ot, s->T0, s->A0);
831     gen_string_movl_A0_EDI(s);
832     gen_op_st_v(s, ot, s->T0, s->A0);
833     gen_op_movl_T0_Dshift(s, ot);
834     gen_op_add_reg_T0(s, s->aflag, R_ESI);
835     gen_op_add_reg_T0(s, s->aflag, R_EDI);
836 }
837 
838 static void gen_op_update1_cc(DisasContext *s)
839 {
840     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
841 }
842 
843 static void gen_op_update2_cc(DisasContext *s)
844 {
845     tcg_gen_mov_tl(cpu_cc_src, s->T1);
846     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
847 }
848 
849 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
850 {
851     tcg_gen_mov_tl(cpu_cc_src2, reg);
852     tcg_gen_mov_tl(cpu_cc_src, s->T1);
853     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
854 }
855 
856 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
857 {
858     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
859 }
860 
861 static void gen_op_update_neg_cc(DisasContext *s)
862 {
863     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
864     tcg_gen_neg_tl(cpu_cc_src, s->T0);
865     tcg_gen_movi_tl(s->cc_srcT, 0);
866 }
867 
868 /* compute all eflags to cc_src */
869 static void gen_compute_eflags(DisasContext *s)
870 {
871     TCGv zero, dst, src1, src2;
872     int live, dead;
873 
874     if (s->cc_op == CC_OP_EFLAGS) {
875         return;
876     }
877     if (s->cc_op == CC_OP_CLR) {
878         tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
879         set_cc_op(s, CC_OP_EFLAGS);
880         return;
881     }
882 
883     zero = NULL;
884     dst = cpu_cc_dst;
885     src1 = cpu_cc_src;
886     src2 = cpu_cc_src2;
887 
888     /* Take care to not read values that are not live.  */
889     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
890     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
891     if (dead) {
892         zero = tcg_constant_tl(0);
893         if (dead & USES_CC_DST) {
894             dst = zero;
895         }
896         if (dead & USES_CC_SRC) {
897             src1 = zero;
898         }
899         if (dead & USES_CC_SRC2) {
900             src2 = zero;
901         }
902     }
903 
904     gen_update_cc_op(s);
905     gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
906     set_cc_op(s, CC_OP_EFLAGS);
907 }
908 
909 typedef struct CCPrepare {
910     TCGCond cond;
911     TCGv reg;
912     TCGv reg2;
913     target_ulong imm;
914     target_ulong mask;
915     bool use_reg2;
916     bool no_setcond;
917 } CCPrepare;
918 
919 /* compute eflags.C to reg */
920 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
921 {
922     TCGv t0, t1;
923     int size, shift;
924 
925     switch (s->cc_op) {
926     case CC_OP_SUBB ... CC_OP_SUBQ:
927         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
928         size = s->cc_op - CC_OP_SUBB;
929         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
930         /* If no temporary was used, be careful not to alias t1 and t0.  */
931         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
932         tcg_gen_mov_tl(t0, s->cc_srcT);
933         gen_extu(size, t0);
934         goto add_sub;
935 
936     case CC_OP_ADDB ... CC_OP_ADDQ:
937         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
938         size = s->cc_op - CC_OP_ADDB;
939         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
940         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
941     add_sub:
942         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
943                              .reg2 = t1, .mask = -1, .use_reg2 = true };
944 
945     case CC_OP_LOGICB ... CC_OP_LOGICQ:
946     case CC_OP_CLR:
947     case CC_OP_POPCNT:
948         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
949 
950     case CC_OP_INCB ... CC_OP_INCQ:
951     case CC_OP_DECB ... CC_OP_DECQ:
952         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
953                              .mask = -1, .no_setcond = true };
954 
955     case CC_OP_SHLB ... CC_OP_SHLQ:
956         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
957         size = s->cc_op - CC_OP_SHLB;
958         shift = (8 << size) - 1;
959         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
960                              .mask = (target_ulong)1 << shift };
961 
962     case CC_OP_MULB ... CC_OP_MULQ:
963         return (CCPrepare) { .cond = TCG_COND_NE,
964                              .reg = cpu_cc_src, .mask = -1 };
965 
966     case CC_OP_BMILGB ... CC_OP_BMILGQ:
967         size = s->cc_op - CC_OP_BMILGB;
968         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
969         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
970 
971     case CC_OP_ADCX:
972     case CC_OP_ADCOX:
973         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
974                              .mask = -1, .no_setcond = true };
975 
976     case CC_OP_EFLAGS:
977     case CC_OP_SARB ... CC_OP_SARQ:
978         /* CC_SRC & 1 */
979         return (CCPrepare) { .cond = TCG_COND_NE,
980                              .reg = cpu_cc_src, .mask = CC_C };
981 
982     default:
983        /* The need to compute only C from CC_OP_DYNAMIC is important
984           in efficiently implementing e.g. INC at the start of a TB.  */
985        gen_update_cc_op(s);
986        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
987                                cpu_cc_src2, cpu_cc_op);
988        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
989                             .mask = -1, .no_setcond = true };
990     }
991 }
992 
993 /* compute eflags.P to reg */
994 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
995 {
996     gen_compute_eflags(s);
997     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
998                          .mask = CC_P };
999 }
1000 
1001 /* compute eflags.S to reg */
1002 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1003 {
1004     switch (s->cc_op) {
1005     case CC_OP_DYNAMIC:
1006         gen_compute_eflags(s);
1007         /* FALLTHRU */
1008     case CC_OP_EFLAGS:
1009     case CC_OP_ADCX:
1010     case CC_OP_ADOX:
1011     case CC_OP_ADCOX:
1012         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1013                              .mask = CC_S };
1014     case CC_OP_CLR:
1015     case CC_OP_POPCNT:
1016         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1017     default:
1018         {
1019             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1020             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1021             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1022         }
1023     }
1024 }
1025 
1026 /* compute eflags.O to reg */
1027 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1028 {
1029     switch (s->cc_op) {
1030     case CC_OP_ADOX:
1031     case CC_OP_ADCOX:
1032         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1033                              .mask = -1, .no_setcond = true };
1034     case CC_OP_CLR:
1035     case CC_OP_POPCNT:
1036         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1037     default:
1038         gen_compute_eflags(s);
1039         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1040                              .mask = CC_O };
1041     }
1042 }
1043 
1044 /* compute eflags.Z to reg */
1045 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1046 {
1047     switch (s->cc_op) {
1048     case CC_OP_DYNAMIC:
1049         gen_compute_eflags(s);
1050         /* FALLTHRU */
1051     case CC_OP_EFLAGS:
1052     case CC_OP_ADCX:
1053     case CC_OP_ADOX:
1054     case CC_OP_ADCOX:
1055         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1056                              .mask = CC_Z };
1057     case CC_OP_CLR:
1058         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1059     case CC_OP_POPCNT:
1060         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1061                              .mask = -1 };
1062     default:
1063         {
1064             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1065             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1066             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1067         }
1068     }
1069 }
1070 
1071 /* perform a conditional store into register 'reg' according to jump opcode
1072    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1073 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1074 {
1075     int inv, jcc_op, cond;
1076     MemOp size;
1077     CCPrepare cc;
1078     TCGv t0;
1079 
1080     inv = b & 1;
1081     jcc_op = (b >> 1) & 7;
1082 
1083     switch (s->cc_op) {
1084     case CC_OP_SUBB ... CC_OP_SUBQ:
1085         /* We optimize relational operators for the cmp/jcc case.  */
1086         size = s->cc_op - CC_OP_SUBB;
1087         switch (jcc_op) {
1088         case JCC_BE:
1089             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1090             gen_extu(size, s->tmp4);
1091             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1092             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1093                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1094             break;
1095 
1096         case JCC_L:
1097             cond = TCG_COND_LT;
1098             goto fast_jcc_l;
1099         case JCC_LE:
1100             cond = TCG_COND_LE;
1101         fast_jcc_l:
1102             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1103             gen_exts(size, s->tmp4);
1104             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1105             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1106                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1107             break;
1108 
1109         default:
1110             goto slow_jcc;
1111         }
1112         break;
1113 
1114     default:
1115     slow_jcc:
1116         /* This actually generates good code for JC, JZ and JS.  */
1117         switch (jcc_op) {
1118         case JCC_O:
1119             cc = gen_prepare_eflags_o(s, reg);
1120             break;
1121         case JCC_B:
1122             cc = gen_prepare_eflags_c(s, reg);
1123             break;
1124         case JCC_Z:
1125             cc = gen_prepare_eflags_z(s, reg);
1126             break;
1127         case JCC_BE:
1128             gen_compute_eflags(s);
1129             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1130                                .mask = CC_Z | CC_C };
1131             break;
1132         case JCC_S:
1133             cc = gen_prepare_eflags_s(s, reg);
1134             break;
1135         case JCC_P:
1136             cc = gen_prepare_eflags_p(s, reg);
1137             break;
1138         case JCC_L:
1139             gen_compute_eflags(s);
1140             if (reg == cpu_cc_src) {
1141                 reg = s->tmp0;
1142             }
1143             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1144             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1145             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1146                                .mask = CC_S };
1147             break;
1148         default:
1149         case JCC_LE:
1150             gen_compute_eflags(s);
1151             if (reg == cpu_cc_src) {
1152                 reg = s->tmp0;
1153             }
1154             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1155             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1156             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1157                                .mask = CC_S | CC_Z };
1158             break;
1159         }
1160         break;
1161     }
1162 
1163     if (inv) {
1164         cc.cond = tcg_invert_cond(cc.cond);
1165     }
1166     return cc;
1167 }
1168 
1169 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1170 {
1171     CCPrepare cc = gen_prepare_cc(s, b, reg);
1172 
1173     if (cc.no_setcond) {
1174         if (cc.cond == TCG_COND_EQ) {
1175             tcg_gen_xori_tl(reg, cc.reg, 1);
1176         } else {
1177             tcg_gen_mov_tl(reg, cc.reg);
1178         }
1179         return;
1180     }
1181 
1182     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1183         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1184         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1185         tcg_gen_andi_tl(reg, reg, 1);
1186         return;
1187     }
1188     if (cc.mask != -1) {
1189         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1190         cc.reg = reg;
1191     }
1192     if (cc.use_reg2) {
1193         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1194     } else {
1195         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1196     }
1197 }
1198 
1199 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1200 {
1201     gen_setcc1(s, JCC_B << 1, reg);
1202 }
1203 
1204 /* generate a conditional jump to label 'l1' according to jump opcode
1205    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1206 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1207 {
1208     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1209 
1210     if (cc.mask != -1) {
1211         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1212         cc.reg = s->T0;
1213     }
1214     if (cc.use_reg2) {
1215         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1216     } else {
1217         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1218     }
1219 }
1220 
1221 /* Generate a conditional jump to label 'l1' according to jump opcode
1222    value 'b'. In the fast case, T0 is guaranteed not to be used.
1223    A translation block must end soon.  */
1224 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1225 {
1226     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1227 
1228     gen_update_cc_op(s);
1229     if (cc.mask != -1) {
1230         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1231         cc.reg = s->T0;
1232     }
1233     set_cc_op(s, CC_OP_DYNAMIC);
1234     if (cc.use_reg2) {
1235         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1236     } else {
1237         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1238     }
1239 }
1240 
1241 /* XXX: does not work with gdbstub "ice" single step - not a
1242    serious problem */
1243 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1244 {
1245     TCGLabel *l1 = gen_new_label();
1246     TCGLabel *l2 = gen_new_label();
1247     gen_op_jnz_ecx(s, l1);
1248     gen_set_label(l2);
1249     gen_jmp_rel_csize(s, 0, 1);
1250     gen_set_label(l1);
1251     return l2;
1252 }
1253 
1254 static void gen_stos(DisasContext *s, MemOp ot)
1255 {
1256     gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
1257     gen_string_movl_A0_EDI(s);
1258     gen_op_st_v(s, ot, s->T0, s->A0);
1259     gen_op_movl_T0_Dshift(s, ot);
1260     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1261 }
1262 
1263 static void gen_lods(DisasContext *s, MemOp ot)
1264 {
1265     gen_string_movl_A0_ESI(s);
1266     gen_op_ld_v(s, ot, s->T0, s->A0);
1267     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1268     gen_op_movl_T0_Dshift(s, ot);
1269     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1270 }
1271 
1272 static void gen_scas(DisasContext *s, MemOp ot)
1273 {
1274     gen_string_movl_A0_EDI(s);
1275     gen_op_ld_v(s, ot, s->T1, s->A0);
1276     gen_op(s, OP_CMPL, ot, R_EAX);
1277     gen_op_movl_T0_Dshift(s, ot);
1278     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1279 }
1280 
1281 static void gen_cmps(DisasContext *s, MemOp ot)
1282 {
1283     gen_string_movl_A0_EDI(s);
1284     gen_op_ld_v(s, ot, s->T1, s->A0);
1285     gen_string_movl_A0_ESI(s);
1286     gen_op(s, OP_CMPL, ot, OR_TMP0);
1287     gen_op_movl_T0_Dshift(s, ot);
1288     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1289     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1290 }
1291 
1292 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1293 {
1294     if (s->flags & HF_IOBPT_MASK) {
1295 #ifdef CONFIG_USER_ONLY
1296         /* user-mode cpu should not be in IOBPT mode */
1297         g_assert_not_reached();
1298 #else
1299         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1300         TCGv t_next = eip_next_tl(s);
1301         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1302 #endif /* CONFIG_USER_ONLY */
1303     }
1304 }
1305 
1306 static void gen_ins(DisasContext *s, MemOp ot)
1307 {
1308     gen_string_movl_A0_EDI(s);
1309     /* Note: we must do this dummy write first to be restartable in
1310        case of page fault. */
1311     tcg_gen_movi_tl(s->T0, 0);
1312     gen_op_st_v(s, ot, s->T0, s->A0);
1313     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1314     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1315     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1316     gen_op_st_v(s, ot, s->T0, s->A0);
1317     gen_op_movl_T0_Dshift(s, ot);
1318     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1319     gen_bpt_io(s, s->tmp2_i32, ot);
1320 }
1321 
1322 static void gen_outs(DisasContext *s, MemOp ot)
1323 {
1324     gen_string_movl_A0_ESI(s);
1325     gen_op_ld_v(s, ot, s->T0, s->A0);
1326 
1327     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1328     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1329     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1330     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1331     gen_op_movl_T0_Dshift(s, ot);
1332     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1333     gen_bpt_io(s, s->tmp2_i32, ot);
1334 }
1335 
1336 /* Generate jumps to current or next instruction */
1337 static void gen_repz(DisasContext *s, MemOp ot,
1338                      void (*fn)(DisasContext *s, MemOp ot))
1339 {
1340     TCGLabel *l2;
1341     gen_update_cc_op(s);
1342     l2 = gen_jz_ecx_string(s);
1343     fn(s, ot);
1344     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1345     /*
1346      * A loop would cause two single step exceptions if ECX = 1
1347      * before rep string_insn
1348      */
1349     if (s->repz_opt) {
1350         gen_op_jz_ecx(s, l2);
1351     }
1352     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1353 }
1354 
1355 #define GEN_REPZ(op) \
1356     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1357     { gen_repz(s, ot, gen_##op); }
1358 
1359 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1360                       void (*fn)(DisasContext *s, MemOp ot))
1361 {
1362     TCGLabel *l2;
1363     gen_update_cc_op(s);
1364     l2 = gen_jz_ecx_string(s);
1365     fn(s, ot);
1366     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1367     gen_update_cc_op(s);
1368     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1369     if (s->repz_opt) {
1370         gen_op_jz_ecx(s, l2);
1371     }
1372     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1373 }
1374 
1375 #define GEN_REPZ2(op) \
1376     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1377     { gen_repz2(s, ot, nz, gen_##op); }
1378 
1379 GEN_REPZ(movs)
1380 GEN_REPZ(stos)
1381 GEN_REPZ(lods)
1382 GEN_REPZ(ins)
1383 GEN_REPZ(outs)
1384 GEN_REPZ2(scas)
1385 GEN_REPZ2(cmps)
1386 
1387 static void gen_helper_fp_arith_ST0_FT0(int op)
1388 {
1389     switch (op) {
1390     case 0:
1391         gen_helper_fadd_ST0_FT0(tcg_env);
1392         break;
1393     case 1:
1394         gen_helper_fmul_ST0_FT0(tcg_env);
1395         break;
1396     case 2:
1397         gen_helper_fcom_ST0_FT0(tcg_env);
1398         break;
1399     case 3:
1400         gen_helper_fcom_ST0_FT0(tcg_env);
1401         break;
1402     case 4:
1403         gen_helper_fsub_ST0_FT0(tcg_env);
1404         break;
1405     case 5:
1406         gen_helper_fsubr_ST0_FT0(tcg_env);
1407         break;
1408     case 6:
1409         gen_helper_fdiv_ST0_FT0(tcg_env);
1410         break;
1411     case 7:
1412         gen_helper_fdivr_ST0_FT0(tcg_env);
1413         break;
1414     }
1415 }
1416 
1417 /* NOTE the exception in "r" op ordering */
1418 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1419 {
1420     TCGv_i32 tmp = tcg_constant_i32(opreg);
1421     switch (op) {
1422     case 0:
1423         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1424         break;
1425     case 1:
1426         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1427         break;
1428     case 4:
1429         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1430         break;
1431     case 5:
1432         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1433         break;
1434     case 6:
1435         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1436         break;
1437     case 7:
1438         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1439         break;
1440     }
1441 }
1442 
1443 static void gen_exception(DisasContext *s, int trapno)
1444 {
1445     gen_update_cc_op(s);
1446     gen_update_eip_cur(s);
1447     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1448     s->base.is_jmp = DISAS_NORETURN;
1449 }
1450 
1451 /* Generate #UD for the current instruction.  The assumption here is that
1452    the instruction is known, but it isn't allowed in the current cpu mode.  */
1453 static void gen_illegal_opcode(DisasContext *s)
1454 {
1455     gen_exception(s, EXCP06_ILLOP);
1456 }
1457 
1458 /* Generate #GP for the current instruction. */
1459 static void gen_exception_gpf(DisasContext *s)
1460 {
1461     gen_exception(s, EXCP0D_GPF);
1462 }
1463 
1464 /* Check for cpl == 0; if not, raise #GP and return false. */
1465 static bool check_cpl0(DisasContext *s)
1466 {
1467     if (CPL(s) == 0) {
1468         return true;
1469     }
1470     gen_exception_gpf(s);
1471     return false;
1472 }
1473 
1474 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1475 static bool check_vm86_iopl(DisasContext *s)
1476 {
1477     if (!VM86(s) || IOPL(s) == 3) {
1478         return true;
1479     }
1480     gen_exception_gpf(s);
1481     return false;
1482 }
1483 
1484 /* Check for iopl allowing access; if not, raise #GP and return false. */
1485 static bool check_iopl(DisasContext *s)
1486 {
1487     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1488         return true;
1489     }
1490     gen_exception_gpf(s);
1491     return false;
1492 }
1493 
1494 /* if d == OR_TMP0, it means memory operand (address in A0) */
1495 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1496 {
1497     if (d != OR_TMP0) {
1498         if (s1->prefix & PREFIX_LOCK) {
1499             /* Lock prefix when destination is not memory.  */
1500             gen_illegal_opcode(s1);
1501             return;
1502         }
1503         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1504     } else if (!(s1->prefix & PREFIX_LOCK)) {
1505         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1506     }
1507     switch(op) {
1508     case OP_ADCL:
1509         gen_compute_eflags_c(s1, s1->tmp4);
1510         if (s1->prefix & PREFIX_LOCK) {
1511             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1512             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1513                                         s1->mem_index, ot | MO_LE);
1514         } else {
1515             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1516             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1517             gen_op_st_rm_T0_A0(s1, ot, d);
1518         }
1519         gen_op_update3_cc(s1, s1->tmp4);
1520         set_cc_op(s1, CC_OP_ADCB + ot);
1521         break;
1522     case OP_SBBL:
1523         gen_compute_eflags_c(s1, s1->tmp4);
1524         if (s1->prefix & PREFIX_LOCK) {
1525             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1526             tcg_gen_neg_tl(s1->T0, s1->T0);
1527             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1528                                         s1->mem_index, ot | MO_LE);
1529         } else {
1530             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1531             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1532             gen_op_st_rm_T0_A0(s1, ot, d);
1533         }
1534         gen_op_update3_cc(s1, s1->tmp4);
1535         set_cc_op(s1, CC_OP_SBBB + ot);
1536         break;
1537     case OP_ADDL:
1538         if (s1->prefix & PREFIX_LOCK) {
1539             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1540                                         s1->mem_index, ot | MO_LE);
1541         } else {
1542             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1543             gen_op_st_rm_T0_A0(s1, ot, d);
1544         }
1545         gen_op_update2_cc(s1);
1546         set_cc_op(s1, CC_OP_ADDB + ot);
1547         break;
1548     case OP_SUBL:
1549         if (s1->prefix & PREFIX_LOCK) {
1550             tcg_gen_neg_tl(s1->T0, s1->T1);
1551             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1552                                         s1->mem_index, ot | MO_LE);
1553             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1554         } else {
1555             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1556             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1557             gen_op_st_rm_T0_A0(s1, ot, d);
1558         }
1559         gen_op_update2_cc(s1);
1560         set_cc_op(s1, CC_OP_SUBB + ot);
1561         break;
1562     default:
1563     case OP_ANDL:
1564         if (s1->prefix & PREFIX_LOCK) {
1565             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1566                                         s1->mem_index, ot | MO_LE);
1567         } else {
1568             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1569             gen_op_st_rm_T0_A0(s1, ot, d);
1570         }
1571         gen_op_update1_cc(s1);
1572         set_cc_op(s1, CC_OP_LOGICB + ot);
1573         break;
1574     case OP_ORL:
1575         if (s1->prefix & PREFIX_LOCK) {
1576             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1577                                        s1->mem_index, ot | MO_LE);
1578         } else {
1579             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1580             gen_op_st_rm_T0_A0(s1, ot, d);
1581         }
1582         gen_op_update1_cc(s1);
1583         set_cc_op(s1, CC_OP_LOGICB + ot);
1584         break;
1585     case OP_XORL:
1586         if (s1->prefix & PREFIX_LOCK) {
1587             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1588                                         s1->mem_index, ot | MO_LE);
1589         } else {
1590             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1591             gen_op_st_rm_T0_A0(s1, ot, d);
1592         }
1593         gen_op_update1_cc(s1);
1594         set_cc_op(s1, CC_OP_LOGICB + ot);
1595         break;
1596     case OP_CMPL:
1597         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1598         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1599         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1600         set_cc_op(s1, CC_OP_SUBB + ot);
1601         break;
1602     }
1603 }
1604 
1605 /* if d == OR_TMP0, it means memory operand (address in A0) */
1606 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1607 {
1608     if (s1->prefix & PREFIX_LOCK) {
1609         if (d != OR_TMP0) {
1610             /* Lock prefix when destination is not memory */
1611             gen_illegal_opcode(s1);
1612             return;
1613         }
1614         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1615         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1616                                     s1->mem_index, ot | MO_LE);
1617     } else {
1618         if (d != OR_TMP0) {
1619             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1620         } else {
1621             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1622         }
1623         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1624         gen_op_st_rm_T0_A0(s1, ot, d);
1625     }
1626 
1627     gen_compute_eflags_c(s1, cpu_cc_src);
1628     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1629     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1630 }
1631 
1632 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1633                             TCGv shm1, TCGv count, bool is_right)
1634 {
1635     TCGv_i32 z32, s32, oldop;
1636     TCGv z_tl;
1637 
1638     /* Store the results into the CC variables.  If we know that the
1639        variable must be dead, store unconditionally.  Otherwise we'll
1640        need to not disrupt the current contents.  */
1641     z_tl = tcg_constant_tl(0);
1642     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1643         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1644                            result, cpu_cc_dst);
1645     } else {
1646         tcg_gen_mov_tl(cpu_cc_dst, result);
1647     }
1648     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1649         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1650                            shm1, cpu_cc_src);
1651     } else {
1652         tcg_gen_mov_tl(cpu_cc_src, shm1);
1653     }
1654 
1655     /* Get the two potential CC_OP values into temporaries.  */
1656     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1657     if (s->cc_op == CC_OP_DYNAMIC) {
1658         oldop = cpu_cc_op;
1659     } else {
1660         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1661         oldop = s->tmp3_i32;
1662     }
1663 
1664     /* Conditionally store the CC_OP value.  */
1665     z32 = tcg_constant_i32(0);
1666     s32 = tcg_temp_new_i32();
1667     tcg_gen_trunc_tl_i32(s32, count);
1668     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1669 
1670     /* The CC_OP value is no longer predictable.  */
1671     set_cc_op(s, CC_OP_DYNAMIC);
1672 }
1673 
1674 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1675                             int is_right, int is_arith)
1676 {
1677     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1678 
1679     /* load */
1680     if (op1 == OR_TMP0) {
1681         gen_op_ld_v(s, ot, s->T0, s->A0);
1682     } else {
1683         gen_op_mov_v_reg(s, ot, s->T0, op1);
1684     }
1685 
1686     tcg_gen_andi_tl(s->T1, s->T1, mask);
1687     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1688 
1689     if (is_right) {
1690         if (is_arith) {
1691             gen_exts(ot, s->T0);
1692             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1693             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1694         } else {
1695             gen_extu(ot, s->T0);
1696             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1697             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1698         }
1699     } else {
1700         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1701         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1702     }
1703 
1704     /* store */
1705     gen_op_st_rm_T0_A0(s, ot, op1);
1706 
1707     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1708 }
1709 
1710 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1711                             int is_right, int is_arith)
1712 {
1713     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1714 
1715     /* load */
1716     if (op1 == OR_TMP0)
1717         gen_op_ld_v(s, ot, s->T0, s->A0);
1718     else
1719         gen_op_mov_v_reg(s, ot, s->T0, op1);
1720 
1721     op2 &= mask;
1722     if (op2 != 0) {
1723         if (is_right) {
1724             if (is_arith) {
1725                 gen_exts(ot, s->T0);
1726                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1727                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1728             } else {
1729                 gen_extu(ot, s->T0);
1730                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1731                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1732             }
1733         } else {
1734             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1735             tcg_gen_shli_tl(s->T0, s->T0, op2);
1736         }
1737     }
1738 
1739     /* store */
1740     gen_op_st_rm_T0_A0(s, ot, op1);
1741 
1742     /* update eflags if non zero shift */
1743     if (op2 != 0) {
1744         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1745         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1746         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1747     }
1748 }
1749 
1750 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1751 {
1752     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1753     TCGv_i32 t0, t1;
1754 
1755     /* load */
1756     if (op1 == OR_TMP0) {
1757         gen_op_ld_v(s, ot, s->T0, s->A0);
1758     } else {
1759         gen_op_mov_v_reg(s, ot, s->T0, op1);
1760     }
1761 
1762     tcg_gen_andi_tl(s->T1, s->T1, mask);
1763 
1764     switch (ot) {
1765     case MO_8:
1766         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1767         tcg_gen_ext8u_tl(s->T0, s->T0);
1768         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1769         goto do_long;
1770     case MO_16:
1771         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1772         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1773         goto do_long;
1774     do_long:
1775 #ifdef TARGET_X86_64
1776     case MO_32:
1777         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1778         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1779         if (is_right) {
1780             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1781         } else {
1782             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1783         }
1784         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1785         break;
1786 #endif
1787     default:
1788         if (is_right) {
1789             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1790         } else {
1791             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1792         }
1793         break;
1794     }
1795 
1796     /* store */
1797     gen_op_st_rm_T0_A0(s, ot, op1);
1798 
1799     /* We'll need the flags computed into CC_SRC.  */
1800     gen_compute_eflags(s);
1801 
1802     /* The value that was "rotated out" is now present at the other end
1803        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1804        since we've computed the flags into CC_SRC, these variables are
1805        currently dead.  */
1806     if (is_right) {
1807         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1808         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1809         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1810     } else {
1811         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1812         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1813     }
1814     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1815     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1816 
1817     /* Now conditionally store the new CC_OP value.  If the shift count
1818        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1819        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1820        exactly as we computed above.  */
1821     t0 = tcg_constant_i32(0);
1822     t1 = tcg_temp_new_i32();
1823     tcg_gen_trunc_tl_i32(t1, s->T1);
1824     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1825     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1826     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1827                         s->tmp2_i32, s->tmp3_i32);
1828 
1829     /* The CC_OP value is no longer predictable.  */
1830     set_cc_op(s, CC_OP_DYNAMIC);
1831 }
1832 
1833 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1834                           int is_right)
1835 {
1836     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1837     int shift;
1838 
1839     /* load */
1840     if (op1 == OR_TMP0) {
1841         gen_op_ld_v(s, ot, s->T0, s->A0);
1842     } else {
1843         gen_op_mov_v_reg(s, ot, s->T0, op1);
1844     }
1845 
1846     op2 &= mask;
1847     if (op2 != 0) {
1848         switch (ot) {
1849 #ifdef TARGET_X86_64
1850         case MO_32:
1851             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1852             if (is_right) {
1853                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1854             } else {
1855                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1856             }
1857             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1858             break;
1859 #endif
1860         default:
1861             if (is_right) {
1862                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1863             } else {
1864                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1865             }
1866             break;
1867         case MO_8:
1868             mask = 7;
1869             goto do_shifts;
1870         case MO_16:
1871             mask = 15;
1872         do_shifts:
1873             shift = op2 & mask;
1874             if (is_right) {
1875                 shift = mask + 1 - shift;
1876             }
1877             gen_extu(ot, s->T0);
1878             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1879             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1880             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1881             break;
1882         }
1883     }
1884 
1885     /* store */
1886     gen_op_st_rm_T0_A0(s, ot, op1);
1887 
1888     if (op2 != 0) {
1889         /* Compute the flags into CC_SRC.  */
1890         gen_compute_eflags(s);
1891 
1892         /* The value that was "rotated out" is now present at the other end
1893            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1894            since we've computed the flags into CC_SRC, these variables are
1895            currently dead.  */
1896         if (is_right) {
1897             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1898             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1899             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1900         } else {
1901             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1902             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1903         }
1904         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1905         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1906         set_cc_op(s, CC_OP_ADCOX);
1907     }
1908 }
1909 
1910 /* XXX: add faster immediate = 1 case */
1911 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1912                            int is_right)
1913 {
1914     gen_compute_eflags(s);
1915     assert(s->cc_op == CC_OP_EFLAGS);
1916 
1917     /* load */
1918     if (op1 == OR_TMP0)
1919         gen_op_ld_v(s, ot, s->T0, s->A0);
1920     else
1921         gen_op_mov_v_reg(s, ot, s->T0, op1);
1922 
1923     if (is_right) {
1924         switch (ot) {
1925         case MO_8:
1926             gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
1927             break;
1928         case MO_16:
1929             gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
1930             break;
1931         case MO_32:
1932             gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
1933             break;
1934 #ifdef TARGET_X86_64
1935         case MO_64:
1936             gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
1937             break;
1938 #endif
1939         default:
1940             g_assert_not_reached();
1941         }
1942     } else {
1943         switch (ot) {
1944         case MO_8:
1945             gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
1946             break;
1947         case MO_16:
1948             gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
1949             break;
1950         case MO_32:
1951             gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
1952             break;
1953 #ifdef TARGET_X86_64
1954         case MO_64:
1955             gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
1956             break;
1957 #endif
1958         default:
1959             g_assert_not_reached();
1960         }
1961     }
1962     /* store */
1963     gen_op_st_rm_T0_A0(s, ot, op1);
1964 }
1965 
1966 /* XXX: add faster immediate case */
1967 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1968                              bool is_right, TCGv count_in)
1969 {
1970     target_ulong mask = (ot == MO_64 ? 63 : 31);
1971     TCGv count;
1972 
1973     /* load */
1974     if (op1 == OR_TMP0) {
1975         gen_op_ld_v(s, ot, s->T0, s->A0);
1976     } else {
1977         gen_op_mov_v_reg(s, ot, s->T0, op1);
1978     }
1979 
1980     count = tcg_temp_new();
1981     tcg_gen_andi_tl(count, count_in, mask);
1982 
1983     switch (ot) {
1984     case MO_16:
1985         /* Note: we implement the Intel behaviour for shift count > 16.
1986            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1987            portion by constructing it as a 32-bit value.  */
1988         if (is_right) {
1989             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1990             tcg_gen_mov_tl(s->T1, s->T0);
1991             tcg_gen_mov_tl(s->T0, s->tmp0);
1992         } else {
1993             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1994         }
1995         /*
1996          * If TARGET_X86_64 defined then fall through into MO_32 case,
1997          * otherwise fall through default case.
1998          */
1999     case MO_32:
2000 #ifdef TARGET_X86_64
2001         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2002         tcg_gen_subi_tl(s->tmp0, count, 1);
2003         if (is_right) {
2004             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2005             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2006             tcg_gen_shr_i64(s->T0, s->T0, count);
2007         } else {
2008             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2009             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2010             tcg_gen_shl_i64(s->T0, s->T0, count);
2011             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2012             tcg_gen_shri_i64(s->T0, s->T0, 32);
2013         }
2014         break;
2015 #endif
2016     default:
2017         tcg_gen_subi_tl(s->tmp0, count, 1);
2018         if (is_right) {
2019             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2020 
2021             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2022             tcg_gen_shr_tl(s->T0, s->T0, count);
2023             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2024         } else {
2025             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2026             if (ot == MO_16) {
2027                 /* Only needed if count > 16, for Intel behaviour.  */
2028                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2029                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2030                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2031             }
2032 
2033             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2034             tcg_gen_shl_tl(s->T0, s->T0, count);
2035             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2036         }
2037         tcg_gen_movi_tl(s->tmp4, 0);
2038         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2039                            s->tmp4, s->T1);
2040         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2041         break;
2042     }
2043 
2044     /* store */
2045     gen_op_st_rm_T0_A0(s, ot, op1);
2046 
2047     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2048 }
2049 
2050 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2051 {
2052     if (s != OR_TMP1)
2053         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2054     switch(op) {
2055     case OP_ROL:
2056         gen_rot_rm_T1(s1, ot, d, 0);
2057         break;
2058     case OP_ROR:
2059         gen_rot_rm_T1(s1, ot, d, 1);
2060         break;
2061     case OP_SHL:
2062     case OP_SHL1:
2063         gen_shift_rm_T1(s1, ot, d, 0, 0);
2064         break;
2065     case OP_SHR:
2066         gen_shift_rm_T1(s1, ot, d, 1, 0);
2067         break;
2068     case OP_SAR:
2069         gen_shift_rm_T1(s1, ot, d, 1, 1);
2070         break;
2071     case OP_RCL:
2072         gen_rotc_rm_T1(s1, ot, d, 0);
2073         break;
2074     case OP_RCR:
2075         gen_rotc_rm_T1(s1, ot, d, 1);
2076         break;
2077     }
2078 }
2079 
2080 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2081 {
2082     switch(op) {
2083     case OP_ROL:
2084         gen_rot_rm_im(s1, ot, d, c, 0);
2085         break;
2086     case OP_ROR:
2087         gen_rot_rm_im(s1, ot, d, c, 1);
2088         break;
2089     case OP_SHL:
2090     case OP_SHL1:
2091         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2092         break;
2093     case OP_SHR:
2094         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2095         break;
2096     case OP_SAR:
2097         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2098         break;
2099     default:
2100         /* currently not optimized */
2101         tcg_gen_movi_tl(s1->T1, c);
2102         gen_shift(s1, op, ot, d, OR_TMP1);
2103         break;
2104     }
2105 }
2106 
2107 #define X86_MAX_INSN_LENGTH 15
2108 
2109 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2110 {
2111     uint64_t pc = s->pc;
2112 
2113     /* This is a subsequent insn that crosses a page boundary.  */
2114     if (s->base.num_insns > 1 &&
2115         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2116         siglongjmp(s->jmpbuf, 2);
2117     }
2118 
2119     s->pc += num_bytes;
2120     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2121         /* If the instruction's 16th byte is on a different page than the 1st, a
2122          * page fault on the second page wins over the general protection fault
2123          * caused by the instruction being too long.
2124          * This can happen even if the operand is only one byte long!
2125          */
2126         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2127             volatile uint8_t unused =
2128                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2129             (void) unused;
2130         }
2131         siglongjmp(s->jmpbuf, 1);
2132     }
2133 
2134     return pc;
2135 }
2136 
2137 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2138 {
2139     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2140 }
2141 
2142 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2143 {
2144     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2145 }
2146 
2147 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2148 {
2149     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2150 }
2151 
2152 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2153 {
2154     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2155 }
2156 
2157 #ifdef TARGET_X86_64
2158 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2159 {
2160     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2161 }
2162 #endif
2163 
2164 /* Decompose an address.  */
2165 
2166 typedef struct AddressParts {
2167     int def_seg;
2168     int base;
2169     int index;
2170     int scale;
2171     target_long disp;
2172 } AddressParts;
2173 
2174 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2175                                     int modrm)
2176 {
2177     int def_seg, base, index, scale, mod, rm;
2178     target_long disp;
2179     bool havesib;
2180 
2181     def_seg = R_DS;
2182     index = -1;
2183     scale = 0;
2184     disp = 0;
2185 
2186     mod = (modrm >> 6) & 3;
2187     rm = modrm & 7;
2188     base = rm | REX_B(s);
2189 
2190     if (mod == 3) {
2191         /* Normally filtered out earlier, but including this path
2192            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2193         goto done;
2194     }
2195 
2196     switch (s->aflag) {
2197     case MO_64:
2198     case MO_32:
2199         havesib = 0;
2200         if (rm == 4) {
2201             int code = x86_ldub_code(env, s);
2202             scale = (code >> 6) & 3;
2203             index = ((code >> 3) & 7) | REX_X(s);
2204             if (index == 4) {
2205                 index = -1;  /* no index */
2206             }
2207             base = (code & 7) | REX_B(s);
2208             havesib = 1;
2209         }
2210 
2211         switch (mod) {
2212         case 0:
2213             if ((base & 7) == 5) {
2214                 base = -1;
2215                 disp = (int32_t)x86_ldl_code(env, s);
2216                 if (CODE64(s) && !havesib) {
2217                     base = -2;
2218                     disp += s->pc + s->rip_offset;
2219                 }
2220             }
2221             break;
2222         case 1:
2223             disp = (int8_t)x86_ldub_code(env, s);
2224             break;
2225         default:
2226         case 2:
2227             disp = (int32_t)x86_ldl_code(env, s);
2228             break;
2229         }
2230 
2231         /* For correct popl handling with esp.  */
2232         if (base == R_ESP && s->popl_esp_hack) {
2233             disp += s->popl_esp_hack;
2234         }
2235         if (base == R_EBP || base == R_ESP) {
2236             def_seg = R_SS;
2237         }
2238         break;
2239 
2240     case MO_16:
2241         if (mod == 0) {
2242             if (rm == 6) {
2243                 base = -1;
2244                 disp = x86_lduw_code(env, s);
2245                 break;
2246             }
2247         } else if (mod == 1) {
2248             disp = (int8_t)x86_ldub_code(env, s);
2249         } else {
2250             disp = (int16_t)x86_lduw_code(env, s);
2251         }
2252 
2253         switch (rm) {
2254         case 0:
2255             base = R_EBX;
2256             index = R_ESI;
2257             break;
2258         case 1:
2259             base = R_EBX;
2260             index = R_EDI;
2261             break;
2262         case 2:
2263             base = R_EBP;
2264             index = R_ESI;
2265             def_seg = R_SS;
2266             break;
2267         case 3:
2268             base = R_EBP;
2269             index = R_EDI;
2270             def_seg = R_SS;
2271             break;
2272         case 4:
2273             base = R_ESI;
2274             break;
2275         case 5:
2276             base = R_EDI;
2277             break;
2278         case 6:
2279             base = R_EBP;
2280             def_seg = R_SS;
2281             break;
2282         default:
2283         case 7:
2284             base = R_EBX;
2285             break;
2286         }
2287         break;
2288 
2289     default:
2290         g_assert_not_reached();
2291     }
2292 
2293  done:
2294     return (AddressParts){ def_seg, base, index, scale, disp };
2295 }
2296 
2297 /* Compute the address, with a minimum number of TCG ops.  */
2298 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2299 {
2300     TCGv ea = NULL;
2301 
2302     if (a.index >= 0 && !is_vsib) {
2303         if (a.scale == 0) {
2304             ea = cpu_regs[a.index];
2305         } else {
2306             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2307             ea = s->A0;
2308         }
2309         if (a.base >= 0) {
2310             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2311             ea = s->A0;
2312         }
2313     } else if (a.base >= 0) {
2314         ea = cpu_regs[a.base];
2315     }
2316     if (!ea) {
2317         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2318             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2319             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2320         } else {
2321             tcg_gen_movi_tl(s->A0, a.disp);
2322         }
2323         ea = s->A0;
2324     } else if (a.disp != 0) {
2325         tcg_gen_addi_tl(s->A0, ea, a.disp);
2326         ea = s->A0;
2327     }
2328 
2329     return ea;
2330 }
2331 
2332 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2333 {
2334     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2335     TCGv ea = gen_lea_modrm_1(s, a, false);
2336     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2337 }
2338 
2339 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2340 {
2341     (void)gen_lea_modrm_0(env, s, modrm);
2342 }
2343 
2344 /* Used for BNDCL, BNDCU, BNDCN.  */
2345 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2346                       TCGCond cond, TCGv_i64 bndv)
2347 {
2348     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2349     TCGv ea = gen_lea_modrm_1(s, a, false);
2350 
2351     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2352     if (!CODE64(s)) {
2353         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2354     }
2355     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2356     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2357     gen_helper_bndck(tcg_env, s->tmp2_i32);
2358 }
2359 
2360 /* used for LEA and MOV AX, mem */
2361 static void gen_add_A0_ds_seg(DisasContext *s)
2362 {
2363     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2364 }
2365 
2366 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2367    OR_TMP0 */
2368 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2369                            MemOp ot, int reg, int is_store)
2370 {
2371     int mod, rm;
2372 
2373     mod = (modrm >> 6) & 3;
2374     rm = (modrm & 7) | REX_B(s);
2375     if (mod == 3) {
2376         if (is_store) {
2377             if (reg != OR_TMP0)
2378                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2379             gen_op_mov_reg_v(s, ot, rm, s->T0);
2380         } else {
2381             gen_op_mov_v_reg(s, ot, s->T0, rm);
2382             if (reg != OR_TMP0)
2383                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2384         }
2385     } else {
2386         gen_lea_modrm(env, s, modrm);
2387         if (is_store) {
2388             if (reg != OR_TMP0)
2389                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2390             gen_op_st_v(s, ot, s->T0, s->A0);
2391         } else {
2392             gen_op_ld_v(s, ot, s->T0, s->A0);
2393             if (reg != OR_TMP0)
2394                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2395         }
2396     }
2397 }
2398 
2399 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2400 {
2401     target_ulong ret;
2402 
2403     switch (ot) {
2404     case MO_8:
2405         ret = x86_ldub_code(env, s);
2406         break;
2407     case MO_16:
2408         ret = x86_lduw_code(env, s);
2409         break;
2410     case MO_32:
2411         ret = x86_ldl_code(env, s);
2412         break;
2413 #ifdef TARGET_X86_64
2414     case MO_64:
2415         ret = x86_ldq_code(env, s);
2416         break;
2417 #endif
2418     default:
2419         g_assert_not_reached();
2420     }
2421     return ret;
2422 }
2423 
2424 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2425 {
2426     uint32_t ret;
2427 
2428     switch (ot) {
2429     case MO_8:
2430         ret = x86_ldub_code(env, s);
2431         break;
2432     case MO_16:
2433         ret = x86_lduw_code(env, s);
2434         break;
2435     case MO_32:
2436 #ifdef TARGET_X86_64
2437     case MO_64:
2438 #endif
2439         ret = x86_ldl_code(env, s);
2440         break;
2441     default:
2442         g_assert_not_reached();
2443     }
2444     return ret;
2445 }
2446 
2447 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2448 {
2449     target_long ret;
2450 
2451     switch (ot) {
2452     case MO_8:
2453         ret = (int8_t) x86_ldub_code(env, s);
2454         break;
2455     case MO_16:
2456         ret = (int16_t) x86_lduw_code(env, s);
2457         break;
2458     case MO_32:
2459         ret = (int32_t) x86_ldl_code(env, s);
2460         break;
2461 #ifdef TARGET_X86_64
2462     case MO_64:
2463         ret = x86_ldq_code(env, s);
2464         break;
2465 #endif
2466     default:
2467         g_assert_not_reached();
2468     }
2469     return ret;
2470 }
2471 
2472 static inline int insn_const_size(MemOp ot)
2473 {
2474     if (ot <= MO_32) {
2475         return 1 << ot;
2476     } else {
2477         return 4;
2478     }
2479 }
2480 
2481 static void gen_jcc(DisasContext *s, int b, int diff)
2482 {
2483     TCGLabel *l1 = gen_new_label();
2484 
2485     gen_jcc1(s, b, l1);
2486     gen_jmp_rel_csize(s, 0, 1);
2487     gen_set_label(l1);
2488     gen_jmp_rel(s, s->dflag, diff, 0);
2489 }
2490 
2491 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
2492                         int modrm, int reg)
2493 {
2494     CCPrepare cc;
2495 
2496     gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2497 
2498     cc = gen_prepare_cc(s, b, s->T1);
2499     if (cc.mask != -1) {
2500         TCGv t0 = tcg_temp_new();
2501         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2502         cc.reg = t0;
2503     }
2504     if (!cc.use_reg2) {
2505         cc.reg2 = tcg_constant_tl(cc.imm);
2506     }
2507 
2508     tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2509                        s->T0, cpu_regs[reg]);
2510     gen_op_mov_reg_v(s, ot, reg, s->T0);
2511 }
2512 
2513 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2514 {
2515     tcg_gen_ld32u_tl(s->T0, tcg_env,
2516                      offsetof(CPUX86State,segs[seg_reg].selector));
2517 }
2518 
2519 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2520 {
2521     tcg_gen_ext16u_tl(s->T0, s->T0);
2522     tcg_gen_st32_tl(s->T0, tcg_env,
2523                     offsetof(CPUX86State,segs[seg_reg].selector));
2524     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2525 }
2526 
2527 /* move T0 to seg_reg and compute if the CPU state may change. Never
2528    call this function with seg_reg == R_CS */
2529 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2530 {
2531     if (PE(s) && !VM86(s)) {
2532         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2533         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2534         /* abort translation because the addseg value may change or
2535            because ss32 may change. For R_SS, translation must always
2536            stop as a special handling must be done to disable hardware
2537            interrupts for the next instruction */
2538         if (seg_reg == R_SS) {
2539             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2540         } else if (CODE32(s) && seg_reg < R_FS) {
2541             s->base.is_jmp = DISAS_EOB_NEXT;
2542         }
2543     } else {
2544         gen_op_movl_seg_T0_vm(s, seg_reg);
2545         if (seg_reg == R_SS) {
2546             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2547         }
2548     }
2549 }
2550 
2551 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2552 {
2553     /* no SVM activated; fast case */
2554     if (likely(!GUEST(s))) {
2555         return;
2556     }
2557     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2558 }
2559 
2560 static inline void gen_stack_update(DisasContext *s, int addend)
2561 {
2562     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2563 }
2564 
2565 /* Generate a push. It depends on ss32, addseg and dflag.  */
2566 static void gen_push_v(DisasContext *s, TCGv val)
2567 {
2568     MemOp d_ot = mo_pushpop(s, s->dflag);
2569     MemOp a_ot = mo_stacksize(s);
2570     int size = 1 << d_ot;
2571     TCGv new_esp = s->A0;
2572 
2573     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2574 
2575     if (!CODE64(s)) {
2576         if (ADDSEG(s)) {
2577             new_esp = s->tmp4;
2578             tcg_gen_mov_tl(new_esp, s->A0);
2579         }
2580         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2581     }
2582 
2583     gen_op_st_v(s, d_ot, val, s->A0);
2584     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2585 }
2586 
2587 /* two step pop is necessary for precise exceptions */
2588 static MemOp gen_pop_T0(DisasContext *s)
2589 {
2590     MemOp d_ot = mo_pushpop(s, s->dflag);
2591 
2592     gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2593     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2594 
2595     return d_ot;
2596 }
2597 
2598 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2599 {
2600     gen_stack_update(s, 1 << ot);
2601 }
2602 
2603 static inline void gen_stack_A0(DisasContext *s)
2604 {
2605     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2606 }
2607 
2608 static void gen_pusha(DisasContext *s)
2609 {
2610     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2611     MemOp d_ot = s->dflag;
2612     int size = 1 << d_ot;
2613     int i;
2614 
2615     for (i = 0; i < 8; i++) {
2616         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2617         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2618         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2619     }
2620 
2621     gen_stack_update(s, -8 * size);
2622 }
2623 
2624 static void gen_popa(DisasContext *s)
2625 {
2626     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2627     MemOp d_ot = s->dflag;
2628     int size = 1 << d_ot;
2629     int i;
2630 
2631     for (i = 0; i < 8; i++) {
2632         /* ESP is not reloaded */
2633         if (7 - i == R_ESP) {
2634             continue;
2635         }
2636         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2637         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2638         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2639         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2640     }
2641 
2642     gen_stack_update(s, 8 * size);
2643 }
2644 
2645 static void gen_enter(DisasContext *s, int esp_addend, int level)
2646 {
2647     MemOp d_ot = mo_pushpop(s, s->dflag);
2648     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2649     int size = 1 << d_ot;
2650 
2651     /* Push BP; compute FrameTemp into T1.  */
2652     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2653     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2654     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2655 
2656     level &= 31;
2657     if (level != 0) {
2658         int i;
2659 
2660         /* Copy level-1 pointers from the previous frame.  */
2661         for (i = 1; i < level; ++i) {
2662             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2663             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2664             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2665 
2666             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2667             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2668             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2669         }
2670 
2671         /* Push the current FrameTemp as the last level.  */
2672         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2673         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2674         gen_op_st_v(s, d_ot, s->T1, s->A0);
2675     }
2676 
2677     /* Copy the FrameTemp value to EBP.  */
2678     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2679 
2680     /* Compute the final value of ESP.  */
2681     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2682     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2683 }
2684 
2685 static void gen_leave(DisasContext *s)
2686 {
2687     MemOp d_ot = mo_pushpop(s, s->dflag);
2688     MemOp a_ot = mo_stacksize(s);
2689 
2690     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2691     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2692 
2693     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2694 
2695     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2696     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2697 }
2698 
2699 /* Similarly, except that the assumption here is that we don't decode
2700    the instruction at all -- either a missing opcode, an unimplemented
2701    feature, or just a bogus instruction stream.  */
2702 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2703 {
2704     gen_illegal_opcode(s);
2705 
2706     if (qemu_loglevel_mask(LOG_UNIMP)) {
2707         FILE *logfile = qemu_log_trylock();
2708         if (logfile) {
2709             target_ulong pc = s->base.pc_next, end = s->pc;
2710 
2711             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2712             for (; pc < end; ++pc) {
2713                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2714             }
2715             fprintf(logfile, "\n");
2716             qemu_log_unlock(logfile);
2717         }
2718     }
2719 }
2720 
2721 /* an interrupt is different from an exception because of the
2722    privilege checks */
2723 static void gen_interrupt(DisasContext *s, int intno)
2724 {
2725     gen_update_cc_op(s);
2726     gen_update_eip_cur(s);
2727     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2728                                cur_insn_len_i32(s));
2729     s->base.is_jmp = DISAS_NORETURN;
2730 }
2731 
2732 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2733 {
2734     if ((s->flags & mask) == 0) {
2735         TCGv_i32 t = tcg_temp_new_i32();
2736         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2737         tcg_gen_ori_i32(t, t, mask);
2738         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2739         s->flags |= mask;
2740     }
2741 }
2742 
2743 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2744 {
2745     if (s->flags & mask) {
2746         TCGv_i32 t = tcg_temp_new_i32();
2747         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2748         tcg_gen_andi_i32(t, t, ~mask);
2749         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2750         s->flags &= ~mask;
2751     }
2752 }
2753 
2754 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2755 {
2756     TCGv t = tcg_temp_new();
2757 
2758     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2759     tcg_gen_ori_tl(t, t, mask);
2760     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2761 }
2762 
2763 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2764 {
2765     TCGv t = tcg_temp_new();
2766 
2767     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2768     tcg_gen_andi_tl(t, t, ~mask);
2769     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2770 }
2771 
2772 /* Clear BND registers during legacy branches.  */
2773 static void gen_bnd_jmp(DisasContext *s)
2774 {
2775     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2776        and if the BNDREGs are known to be in use (non-zero) already.
2777        The helper itself will check BNDPRESERVE at runtime.  */
2778     if ((s->prefix & PREFIX_REPNZ) == 0
2779         && (s->flags & HF_MPX_EN_MASK) != 0
2780         && (s->flags & HF_MPX_IU_MASK) != 0) {
2781         gen_helper_bnd_jmp(tcg_env);
2782     }
2783 }
2784 
2785 /* Generate an end of block. Trace exception is also generated if needed.
2786    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2787    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2788    S->TF.  This is used by the syscall/sysret insns.  */
2789 static void
2790 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2791 {
2792     gen_update_cc_op(s);
2793 
2794     /* If several instructions disable interrupts, only the first does it.  */
2795     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2796         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2797     } else {
2798         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2799     }
2800 
2801     if (s->base.tb->flags & HF_RF_MASK) {
2802         gen_reset_eflags(s, RF_MASK);
2803     }
2804     if (recheck_tf) {
2805         gen_helper_rechecking_single_step(tcg_env);
2806         tcg_gen_exit_tb(NULL, 0);
2807     } else if (s->flags & HF_TF_MASK) {
2808         gen_helper_single_step(tcg_env);
2809     } else if (jr) {
2810         tcg_gen_lookup_and_goto_ptr();
2811     } else {
2812         tcg_gen_exit_tb(NULL, 0);
2813     }
2814     s->base.is_jmp = DISAS_NORETURN;
2815 }
2816 
2817 static inline void
2818 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2819 {
2820     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2821 }
2822 
2823 /* End of block.
2824    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2825 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2826 {
2827     gen_eob_worker(s, inhibit, false);
2828 }
2829 
2830 /* End of block, resetting the inhibit irq flag.  */
2831 static void gen_eob(DisasContext *s)
2832 {
2833     gen_eob_worker(s, false, false);
2834 }
2835 
2836 /* Jump to register */
2837 static void gen_jr(DisasContext *s)
2838 {
2839     do_gen_eob_worker(s, false, false, true);
2840 }
2841 
2842 /* Jump to eip+diff, truncating the result to OT. */
2843 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2844 {
2845     bool use_goto_tb = s->jmp_opt;
2846     target_ulong mask = -1;
2847     target_ulong new_pc = s->pc + diff;
2848     target_ulong new_eip = new_pc - s->cs_base;
2849 
2850     /* In 64-bit mode, operand size is fixed at 64 bits. */
2851     if (!CODE64(s)) {
2852         if (ot == MO_16) {
2853             mask = 0xffff;
2854             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2855                 use_goto_tb = false;
2856             }
2857         } else {
2858             mask = 0xffffffff;
2859         }
2860     }
2861     new_eip &= mask;
2862 
2863     gen_update_cc_op(s);
2864     set_cc_op(s, CC_OP_DYNAMIC);
2865 
2866     if (tb_cflags(s->base.tb) & CF_PCREL) {
2867         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2868         /*
2869          * If we can prove the branch does not leave the page and we have
2870          * no extra masking to apply (data16 branch in code32, see above),
2871          * then we have also proven that the addition does not wrap.
2872          */
2873         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2874             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2875             use_goto_tb = false;
2876         }
2877     }
2878 
2879     if (use_goto_tb &&
2880         translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
2881         /* jump to same page: we can use a direct jump */
2882         tcg_gen_goto_tb(tb_num);
2883         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2884             tcg_gen_movi_tl(cpu_eip, new_eip);
2885         }
2886         tcg_gen_exit_tb(s->base.tb, tb_num);
2887         s->base.is_jmp = DISAS_NORETURN;
2888     } else {
2889         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2890             tcg_gen_movi_tl(cpu_eip, new_eip);
2891         }
2892         if (s->jmp_opt) {
2893             gen_jr(s);   /* jump to another page */
2894         } else {
2895             gen_eob(s);  /* exit to main loop */
2896         }
2897     }
2898 }
2899 
2900 /* Jump to eip+diff, truncating to the current code size. */
2901 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2902 {
2903     /* CODE64 ignores the OT argument, so we need not consider it. */
2904     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2905 }
2906 
2907 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2908 {
2909     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2910     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2911 }
2912 
2913 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2914 {
2915     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2916     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2917 }
2918 
2919 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2920 {
2921     int mem_index = s->mem_index;
2922     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2923                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2924     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
2925     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2926     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2927     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
2928 }
2929 
2930 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2931 {
2932     int mem_index = s->mem_index;
2933     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
2934     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2935                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2936     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2937     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
2938     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2939 }
2940 
2941 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2942 {
2943     int mem_index = s->mem_index;
2944     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2945                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2946     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
2947     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2948     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2949     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
2950 
2951     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2952     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2953     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
2954     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2955     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2956     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
2957 }
2958 
2959 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2960 {
2961     int mem_index = s->mem_index;
2962     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
2963     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2964                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2965     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2966     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
2967     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2968     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2969     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
2970     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2971     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2972     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
2973     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2974 }
2975 
2976 #include "decode-new.h"
2977 #include "emit.c.inc"
2978 #include "decode-new.c.inc"
2979 
2980 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2981 {
2982     TCGv_i64 cmp, val, old;
2983     TCGv Z;
2984 
2985     gen_lea_modrm(env, s, modrm);
2986 
2987     cmp = tcg_temp_new_i64();
2988     val = tcg_temp_new_i64();
2989     old = tcg_temp_new_i64();
2990 
2991     /* Construct the comparison values from the register pair. */
2992     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2993     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2994 
2995     /* Only require atomic with LOCK; non-parallel handled in generator. */
2996     if (s->prefix & PREFIX_LOCK) {
2997         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2998     } else {
2999         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3000                                       s->mem_index, MO_TEUQ);
3001     }
3002 
3003     /* Set tmp0 to match the required value of Z. */
3004     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3005     Z = tcg_temp_new();
3006     tcg_gen_trunc_i64_tl(Z, cmp);
3007 
3008     /*
3009      * Extract the result values for the register pair.
3010      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3011      * the old value matches the previous value in EDX:EAX.  For x86_64,
3012      * the store must be conditional, because we must leave the source
3013      * registers unchanged on success, and zero-extend the writeback
3014      * on failure (Z=0).
3015      */
3016     if (TARGET_LONG_BITS == 32) {
3017         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3018     } else {
3019         TCGv zero = tcg_constant_tl(0);
3020 
3021         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3022         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3023                            s->T0, cpu_regs[R_EAX]);
3024         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3025                            s->T1, cpu_regs[R_EDX]);
3026     }
3027 
3028     /* Update Z. */
3029     gen_compute_eflags(s);
3030     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3031 }
3032 
3033 #ifdef TARGET_X86_64
3034 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3035 {
3036     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3037     TCGv_i64 t0, t1;
3038     TCGv_i128 cmp, val;
3039 
3040     gen_lea_modrm(env, s, modrm);
3041 
3042     cmp = tcg_temp_new_i128();
3043     val = tcg_temp_new_i128();
3044     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3045     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3046 
3047     /* Only require atomic with LOCK; non-parallel handled in generator. */
3048     if (s->prefix & PREFIX_LOCK) {
3049         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3050     } else {
3051         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3052     }
3053 
3054     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3055 
3056     /* Determine success after the fact. */
3057     t0 = tcg_temp_new_i64();
3058     t1 = tcg_temp_new_i64();
3059     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3060     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3061     tcg_gen_or_i64(t0, t0, t1);
3062 
3063     /* Update Z. */
3064     gen_compute_eflags(s);
3065     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3066     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3067 
3068     /*
3069      * Extract the result values for the register pair.  We may do this
3070      * unconditionally, because on success (Z=1), the old value matches
3071      * the previous value in RDX:RAX.
3072      */
3073     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3074     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3075 }
3076 #endif
3077 
3078 /* convert one instruction. s->base.is_jmp is set if the translation must
3079    be stopped. Return the next pc value */
3080 static bool disas_insn(DisasContext *s, CPUState *cpu)
3081 {
3082     CPUX86State *env = cpu_env(cpu);
3083     int b, prefixes;
3084     int shift;
3085     MemOp ot, aflag, dflag;
3086     int modrm, reg, rm, mod, op, opreg, val;
3087     bool orig_cc_op_dirty = s->cc_op_dirty;
3088     CCOp orig_cc_op = s->cc_op;
3089     target_ulong orig_pc_save = s->pc_save;
3090 
3091     s->pc = s->base.pc_next;
3092     s->override = -1;
3093 #ifdef TARGET_X86_64
3094     s->rex_r = 0;
3095     s->rex_x = 0;
3096     s->rex_b = 0;
3097 #endif
3098     s->rip_offset = 0; /* for relative ip address */
3099     s->vex_l = 0;
3100     s->vex_v = 0;
3101     s->vex_w = false;
3102     switch (sigsetjmp(s->jmpbuf, 0)) {
3103     case 0:
3104         break;
3105     case 1:
3106         gen_exception_gpf(s);
3107         return true;
3108     case 2:
3109         /* Restore state that may affect the next instruction. */
3110         s->pc = s->base.pc_next;
3111         /*
3112          * TODO: These save/restore can be removed after the table-based
3113          * decoder is complete; we will be decoding the insn completely
3114          * before any code generation that might affect these variables.
3115          */
3116         s->cc_op_dirty = orig_cc_op_dirty;
3117         s->cc_op = orig_cc_op;
3118         s->pc_save = orig_pc_save;
3119         /* END TODO */
3120         s->base.num_insns--;
3121         tcg_remove_ops_after(s->prev_insn_end);
3122         s->base.is_jmp = DISAS_TOO_MANY;
3123         return false;
3124     default:
3125         g_assert_not_reached();
3126     }
3127 
3128     prefixes = 0;
3129 
3130  next_byte:
3131     s->prefix = prefixes;
3132     b = x86_ldub_code(env, s);
3133     /* Collect prefixes.  */
3134     switch (b) {
3135     default:
3136         break;
3137     case 0x0f:
3138         b = x86_ldub_code(env, s) + 0x100;
3139         break;
3140     case 0xf3:
3141         prefixes |= PREFIX_REPZ;
3142         prefixes &= ~PREFIX_REPNZ;
3143         goto next_byte;
3144     case 0xf2:
3145         prefixes |= PREFIX_REPNZ;
3146         prefixes &= ~PREFIX_REPZ;
3147         goto next_byte;
3148     case 0xf0:
3149         prefixes |= PREFIX_LOCK;
3150         goto next_byte;
3151     case 0x2e:
3152         s->override = R_CS;
3153         goto next_byte;
3154     case 0x36:
3155         s->override = R_SS;
3156         goto next_byte;
3157     case 0x3e:
3158         s->override = R_DS;
3159         goto next_byte;
3160     case 0x26:
3161         s->override = R_ES;
3162         goto next_byte;
3163     case 0x64:
3164         s->override = R_FS;
3165         goto next_byte;
3166     case 0x65:
3167         s->override = R_GS;
3168         goto next_byte;
3169     case 0x66:
3170         prefixes |= PREFIX_DATA;
3171         goto next_byte;
3172     case 0x67:
3173         prefixes |= PREFIX_ADR;
3174         goto next_byte;
3175 #ifdef TARGET_X86_64
3176     case 0x40 ... 0x4f:
3177         if (CODE64(s)) {
3178             /* REX prefix */
3179             prefixes |= PREFIX_REX;
3180             s->vex_w = (b >> 3) & 1;
3181             s->rex_r = (b & 0x4) << 1;
3182             s->rex_x = (b & 0x2) << 2;
3183             s->rex_b = (b & 0x1) << 3;
3184             goto next_byte;
3185         }
3186         break;
3187 #endif
3188     case 0xc5: /* 2-byte VEX */
3189     case 0xc4: /* 3-byte VEX */
3190         if (CODE32(s) && !VM86(s)) {
3191             int vex2 = x86_ldub_code(env, s);
3192             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3193 
3194             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3195                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3196                    otherwise the instruction is LES or LDS.  */
3197                 break;
3198             }
3199             disas_insn_new(s, cpu, b);
3200             return s->pc;
3201         }
3202         break;
3203     }
3204 
3205     /* Post-process prefixes.  */
3206     if (CODE64(s)) {
3207         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3208            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3209            over 0x66 if both are present.  */
3210         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3211         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3212         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3213     } else {
3214         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3215         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3216             dflag = MO_32;
3217         } else {
3218             dflag = MO_16;
3219         }
3220         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3221         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3222             aflag = MO_32;
3223         }  else {
3224             aflag = MO_16;
3225         }
3226     }
3227 
3228     s->prefix = prefixes;
3229     s->aflag = aflag;
3230     s->dflag = dflag;
3231 
3232     /* now check op code */
3233     switch (b) {
3234         /**************************/
3235         /* arith & logic */
3236     case 0x00 ... 0x05:
3237     case 0x08 ... 0x0d:
3238     case 0x10 ... 0x15:
3239     case 0x18 ... 0x1d:
3240     case 0x20 ... 0x25:
3241     case 0x28 ... 0x2d:
3242     case 0x30 ... 0x35:
3243     case 0x38 ... 0x3d:
3244         {
3245             int f;
3246             op = (b >> 3) & 7;
3247             f = (b >> 1) & 3;
3248 
3249             ot = mo_b_d(b, dflag);
3250 
3251             switch(f) {
3252             case 0: /* OP Ev, Gv */
3253                 modrm = x86_ldub_code(env, s);
3254                 reg = ((modrm >> 3) & 7) | REX_R(s);
3255                 mod = (modrm >> 6) & 3;
3256                 rm = (modrm & 7) | REX_B(s);
3257                 if (mod != 3) {
3258                     gen_lea_modrm(env, s, modrm);
3259                     opreg = OR_TMP0;
3260                 } else if (op == OP_XORL && rm == reg) {
3261                 xor_zero:
3262                     /* xor reg, reg optimisation */
3263                     set_cc_op(s, CC_OP_CLR);
3264                     tcg_gen_movi_tl(s->T0, 0);
3265                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3266                     break;
3267                 } else {
3268                     opreg = rm;
3269                 }
3270                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3271                 gen_op(s, op, ot, opreg);
3272                 break;
3273             case 1: /* OP Gv, Ev */
3274                 modrm = x86_ldub_code(env, s);
3275                 mod = (modrm >> 6) & 3;
3276                 reg = ((modrm >> 3) & 7) | REX_R(s);
3277                 rm = (modrm & 7) | REX_B(s);
3278                 if (mod != 3) {
3279                     gen_lea_modrm(env, s, modrm);
3280                     gen_op_ld_v(s, ot, s->T1, s->A0);
3281                 } else if (op == OP_XORL && rm == reg) {
3282                     goto xor_zero;
3283                 } else {
3284                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3285                 }
3286                 gen_op(s, op, ot, reg);
3287                 break;
3288             case 2: /* OP A, Iv */
3289                 val = insn_get(env, s, ot);
3290                 tcg_gen_movi_tl(s->T1, val);
3291                 gen_op(s, op, ot, OR_EAX);
3292                 break;
3293             }
3294         }
3295         break;
3296 
3297     case 0x82:
3298         if (CODE64(s))
3299             goto illegal_op;
3300         /* fall through */
3301     case 0x80: /* GRP1 */
3302     case 0x81:
3303     case 0x83:
3304         {
3305             ot = mo_b_d(b, dflag);
3306 
3307             modrm = x86_ldub_code(env, s);
3308             mod = (modrm >> 6) & 3;
3309             rm = (modrm & 7) | REX_B(s);
3310             op = (modrm >> 3) & 7;
3311 
3312             if (mod != 3) {
3313                 if (b == 0x83)
3314                     s->rip_offset = 1;
3315                 else
3316                     s->rip_offset = insn_const_size(ot);
3317                 gen_lea_modrm(env, s, modrm);
3318                 opreg = OR_TMP0;
3319             } else {
3320                 opreg = rm;
3321             }
3322 
3323             switch(b) {
3324             default:
3325             case 0x80:
3326             case 0x81:
3327             case 0x82:
3328                 val = insn_get(env, s, ot);
3329                 break;
3330             case 0x83:
3331                 val = (int8_t)insn_get(env, s, MO_8);
3332                 break;
3333             }
3334             tcg_gen_movi_tl(s->T1, val);
3335             gen_op(s, op, ot, opreg);
3336         }
3337         break;
3338 
3339         /**************************/
3340         /* inc, dec, and other misc arith */
3341     case 0x40 ... 0x47: /* inc Gv */
3342         ot = dflag;
3343         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3344         break;
3345     case 0x48 ... 0x4f: /* dec Gv */
3346         ot = dflag;
3347         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3348         break;
3349     case 0xf6: /* GRP3 */
3350     case 0xf7:
3351         ot = mo_b_d(b, dflag);
3352 
3353         modrm = x86_ldub_code(env, s);
3354         mod = (modrm >> 6) & 3;
3355         rm = (modrm & 7) | REX_B(s);
3356         op = (modrm >> 3) & 7;
3357         if (mod != 3) {
3358             if (op == 0) {
3359                 s->rip_offset = insn_const_size(ot);
3360             }
3361             gen_lea_modrm(env, s, modrm);
3362             /* For those below that handle locked memory, don't load here.  */
3363             if (!(s->prefix & PREFIX_LOCK)
3364                 || op != 2) {
3365                 gen_op_ld_v(s, ot, s->T0, s->A0);
3366             }
3367         } else {
3368             gen_op_mov_v_reg(s, ot, s->T0, rm);
3369         }
3370 
3371         switch(op) {
3372         case 0: /* test */
3373             val = insn_get(env, s, ot);
3374             tcg_gen_movi_tl(s->T1, val);
3375             gen_op_testl_T0_T1_cc(s);
3376             set_cc_op(s, CC_OP_LOGICB + ot);
3377             break;
3378         case 2: /* not */
3379             if (s->prefix & PREFIX_LOCK) {
3380                 if (mod == 3) {
3381                     goto illegal_op;
3382                 }
3383                 tcg_gen_movi_tl(s->T0, ~0);
3384                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3385                                             s->mem_index, ot | MO_LE);
3386             } else {
3387                 tcg_gen_not_tl(s->T0, s->T0);
3388                 if (mod != 3) {
3389                     gen_op_st_v(s, ot, s->T0, s->A0);
3390                 } else {
3391                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3392                 }
3393             }
3394             break;
3395         case 3: /* neg */
3396             if (s->prefix & PREFIX_LOCK) {
3397                 TCGLabel *label1;
3398                 TCGv a0, t0, t1, t2;
3399 
3400                 if (mod == 3) {
3401                     goto illegal_op;
3402                 }
3403                 a0 = s->A0;
3404                 t0 = s->T0;
3405                 label1 = gen_new_label();
3406 
3407                 gen_set_label(label1);
3408                 t1 = tcg_temp_new();
3409                 t2 = tcg_temp_new();
3410                 tcg_gen_mov_tl(t2, t0);
3411                 tcg_gen_neg_tl(t1, t0);
3412                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3413                                           s->mem_index, ot | MO_LE);
3414                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3415 
3416                 tcg_gen_neg_tl(s->T0, t0);
3417             } else {
3418                 tcg_gen_neg_tl(s->T0, s->T0);
3419                 if (mod != 3) {
3420                     gen_op_st_v(s, ot, s->T0, s->A0);
3421                 } else {
3422                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3423                 }
3424             }
3425             gen_op_update_neg_cc(s);
3426             set_cc_op(s, CC_OP_SUBB + ot);
3427             break;
3428         case 4: /* mul */
3429             switch(ot) {
3430             case MO_8:
3431                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3432                 tcg_gen_ext8u_tl(s->T0, s->T0);
3433                 tcg_gen_ext8u_tl(s->T1, s->T1);
3434                 /* XXX: use 32 bit mul which could be faster */
3435                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3436                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3437                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3438                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3439                 set_cc_op(s, CC_OP_MULB);
3440                 break;
3441             case MO_16:
3442                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3443                 tcg_gen_ext16u_tl(s->T0, s->T0);
3444                 tcg_gen_ext16u_tl(s->T1, s->T1);
3445                 /* XXX: use 32 bit mul which could be faster */
3446                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3447                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3448                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3449                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3450                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3451                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3452                 set_cc_op(s, CC_OP_MULW);
3453                 break;
3454             default:
3455             case MO_32:
3456                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3457                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3458                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3459                                   s->tmp2_i32, s->tmp3_i32);
3460                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3461                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3462                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3463                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3464                 set_cc_op(s, CC_OP_MULL);
3465                 break;
3466 #ifdef TARGET_X86_64
3467             case MO_64:
3468                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3469                                   s->T0, cpu_regs[R_EAX]);
3470                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3471                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3472                 set_cc_op(s, CC_OP_MULQ);
3473                 break;
3474 #endif
3475             }
3476             break;
3477         case 5: /* imul */
3478             switch(ot) {
3479             case MO_8:
3480                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3481                 tcg_gen_ext8s_tl(s->T0, s->T0);
3482                 tcg_gen_ext8s_tl(s->T1, s->T1);
3483                 /* XXX: use 32 bit mul which could be faster */
3484                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3485                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3486                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3487                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3488                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3489                 set_cc_op(s, CC_OP_MULB);
3490                 break;
3491             case MO_16:
3492                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3493                 tcg_gen_ext16s_tl(s->T0, s->T0);
3494                 tcg_gen_ext16s_tl(s->T1, s->T1);
3495                 /* XXX: use 32 bit mul which could be faster */
3496                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3497                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3498                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3499                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3500                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3501                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3502                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3503                 set_cc_op(s, CC_OP_MULW);
3504                 break;
3505             default:
3506             case MO_32:
3507                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3508                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3509                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3510                                   s->tmp2_i32, s->tmp3_i32);
3511                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3512                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3513                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3514                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3515                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3516                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3517                 set_cc_op(s, CC_OP_MULL);
3518                 break;
3519 #ifdef TARGET_X86_64
3520             case MO_64:
3521                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3522                                   s->T0, cpu_regs[R_EAX]);
3523                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3524                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3525                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3526                 set_cc_op(s, CC_OP_MULQ);
3527                 break;
3528 #endif
3529             }
3530             break;
3531         case 6: /* div */
3532             switch(ot) {
3533             case MO_8:
3534                 gen_helper_divb_AL(tcg_env, s->T0);
3535                 break;
3536             case MO_16:
3537                 gen_helper_divw_AX(tcg_env, s->T0);
3538                 break;
3539             default:
3540             case MO_32:
3541                 gen_helper_divl_EAX(tcg_env, s->T0);
3542                 break;
3543 #ifdef TARGET_X86_64
3544             case MO_64:
3545                 gen_helper_divq_EAX(tcg_env, s->T0);
3546                 break;
3547 #endif
3548             }
3549             break;
3550         case 7: /* idiv */
3551             switch(ot) {
3552             case MO_8:
3553                 gen_helper_idivb_AL(tcg_env, s->T0);
3554                 break;
3555             case MO_16:
3556                 gen_helper_idivw_AX(tcg_env, s->T0);
3557                 break;
3558             default:
3559             case MO_32:
3560                 gen_helper_idivl_EAX(tcg_env, s->T0);
3561                 break;
3562 #ifdef TARGET_X86_64
3563             case MO_64:
3564                 gen_helper_idivq_EAX(tcg_env, s->T0);
3565                 break;
3566 #endif
3567             }
3568             break;
3569         default:
3570             goto unknown_op;
3571         }
3572         break;
3573 
3574     case 0xfe: /* GRP4 */
3575     case 0xff: /* GRP5 */
3576         ot = mo_b_d(b, dflag);
3577 
3578         modrm = x86_ldub_code(env, s);
3579         mod = (modrm >> 6) & 3;
3580         rm = (modrm & 7) | REX_B(s);
3581         op = (modrm >> 3) & 7;
3582         if (op >= 2 && b == 0xfe) {
3583             goto unknown_op;
3584         }
3585         if (CODE64(s)) {
3586             if (op == 2 || op == 4) {
3587                 /* operand size for jumps is 64 bit */
3588                 ot = MO_64;
3589             } else if (op == 3 || op == 5) {
3590                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3591             } else if (op == 6) {
3592                 /* default push size is 64 bit */
3593                 ot = mo_pushpop(s, dflag);
3594             }
3595         }
3596         if (mod != 3) {
3597             gen_lea_modrm(env, s, modrm);
3598             if (op >= 2 && op != 3 && op != 5)
3599                 gen_op_ld_v(s, ot, s->T0, s->A0);
3600         } else {
3601             gen_op_mov_v_reg(s, ot, s->T0, rm);
3602         }
3603 
3604         switch(op) {
3605         case 0: /* inc Ev */
3606             if (mod != 3)
3607                 opreg = OR_TMP0;
3608             else
3609                 opreg = rm;
3610             gen_inc(s, ot, opreg, 1);
3611             break;
3612         case 1: /* dec Ev */
3613             if (mod != 3)
3614                 opreg = OR_TMP0;
3615             else
3616                 opreg = rm;
3617             gen_inc(s, ot, opreg, -1);
3618             break;
3619         case 2: /* call Ev */
3620             /* XXX: optimize if memory (no 'and' is necessary) */
3621             if (dflag == MO_16) {
3622                 tcg_gen_ext16u_tl(s->T0, s->T0);
3623             }
3624             gen_push_v(s, eip_next_tl(s));
3625             gen_op_jmp_v(s, s->T0);
3626             gen_bnd_jmp(s);
3627             s->base.is_jmp = DISAS_JUMP;
3628             break;
3629         case 3: /* lcall Ev */
3630             if (mod == 3) {
3631                 goto illegal_op;
3632             }
3633             gen_op_ld_v(s, ot, s->T1, s->A0);
3634             gen_add_A0_im(s, 1 << ot);
3635             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3636         do_lcall:
3637             if (PE(s) && !VM86(s)) {
3638                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3639                 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
3640                                            tcg_constant_i32(dflag - 1),
3641                                            eip_next_tl(s));
3642             } else {
3643                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3644                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3645                 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
3646                                       tcg_constant_i32(dflag - 1),
3647                                       eip_next_i32(s));
3648             }
3649             s->base.is_jmp = DISAS_JUMP;
3650             break;
3651         case 4: /* jmp Ev */
3652             if (dflag == MO_16) {
3653                 tcg_gen_ext16u_tl(s->T0, s->T0);
3654             }
3655             gen_op_jmp_v(s, s->T0);
3656             gen_bnd_jmp(s);
3657             s->base.is_jmp = DISAS_JUMP;
3658             break;
3659         case 5: /* ljmp Ev */
3660             if (mod == 3) {
3661                 goto illegal_op;
3662             }
3663             gen_op_ld_v(s, ot, s->T1, s->A0);
3664             gen_add_A0_im(s, 1 << ot);
3665             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3666         do_ljmp:
3667             if (PE(s) && !VM86(s)) {
3668                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3669                 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
3670                                           eip_next_tl(s));
3671             } else {
3672                 gen_op_movl_seg_T0_vm(s, R_CS);
3673                 gen_op_jmp_v(s, s->T1);
3674             }
3675             s->base.is_jmp = DISAS_JUMP;
3676             break;
3677         case 6: /* push Ev */
3678             gen_push_v(s, s->T0);
3679             break;
3680         default:
3681             goto unknown_op;
3682         }
3683         break;
3684 
3685     case 0x84: /* test Ev, Gv */
3686     case 0x85:
3687         ot = mo_b_d(b, dflag);
3688 
3689         modrm = x86_ldub_code(env, s);
3690         reg = ((modrm >> 3) & 7) | REX_R(s);
3691 
3692         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3693         gen_op_mov_v_reg(s, ot, s->T1, reg);
3694         gen_op_testl_T0_T1_cc(s);
3695         set_cc_op(s, CC_OP_LOGICB + ot);
3696         break;
3697 
3698     case 0xa8: /* test eAX, Iv */
3699     case 0xa9:
3700         ot = mo_b_d(b, dflag);
3701         val = insn_get(env, s, ot);
3702 
3703         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3704         tcg_gen_movi_tl(s->T1, val);
3705         gen_op_testl_T0_T1_cc(s);
3706         set_cc_op(s, CC_OP_LOGICB + ot);
3707         break;
3708 
3709     case 0x98: /* CWDE/CBW */
3710         switch (dflag) {
3711 #ifdef TARGET_X86_64
3712         case MO_64:
3713             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3714             tcg_gen_ext32s_tl(s->T0, s->T0);
3715             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3716             break;
3717 #endif
3718         case MO_32:
3719             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3720             tcg_gen_ext16s_tl(s->T0, s->T0);
3721             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3722             break;
3723         case MO_16:
3724             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3725             tcg_gen_ext8s_tl(s->T0, s->T0);
3726             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3727             break;
3728         default:
3729             g_assert_not_reached();
3730         }
3731         break;
3732     case 0x99: /* CDQ/CWD */
3733         switch (dflag) {
3734 #ifdef TARGET_X86_64
3735         case MO_64:
3736             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3737             tcg_gen_sari_tl(s->T0, s->T0, 63);
3738             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3739             break;
3740 #endif
3741         case MO_32:
3742             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3743             tcg_gen_ext32s_tl(s->T0, s->T0);
3744             tcg_gen_sari_tl(s->T0, s->T0, 31);
3745             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3746             break;
3747         case MO_16:
3748             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3749             tcg_gen_ext16s_tl(s->T0, s->T0);
3750             tcg_gen_sari_tl(s->T0, s->T0, 15);
3751             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3752             break;
3753         default:
3754             g_assert_not_reached();
3755         }
3756         break;
3757     case 0x1af: /* imul Gv, Ev */
3758     case 0x69: /* imul Gv, Ev, I */
3759     case 0x6b:
3760         ot = dflag;
3761         modrm = x86_ldub_code(env, s);
3762         reg = ((modrm >> 3) & 7) | REX_R(s);
3763         if (b == 0x69)
3764             s->rip_offset = insn_const_size(ot);
3765         else if (b == 0x6b)
3766             s->rip_offset = 1;
3767         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3768         if (b == 0x69) {
3769             val = insn_get(env, s, ot);
3770             tcg_gen_movi_tl(s->T1, val);
3771         } else if (b == 0x6b) {
3772             val = (int8_t)insn_get(env, s, MO_8);
3773             tcg_gen_movi_tl(s->T1, val);
3774         } else {
3775             gen_op_mov_v_reg(s, ot, s->T1, reg);
3776         }
3777         switch (ot) {
3778 #ifdef TARGET_X86_64
3779         case MO_64:
3780             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3781             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3782             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3783             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3784             break;
3785 #endif
3786         case MO_32:
3787             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3788             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3789             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3790                               s->tmp2_i32, s->tmp3_i32);
3791             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3792             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3793             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3794             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3795             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3796             break;
3797         default:
3798             tcg_gen_ext16s_tl(s->T0, s->T0);
3799             tcg_gen_ext16s_tl(s->T1, s->T1);
3800             /* XXX: use 32 bit mul which could be faster */
3801             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3802             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3803             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3804             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3805             gen_op_mov_reg_v(s, ot, reg, s->T0);
3806             break;
3807         }
3808         set_cc_op(s, CC_OP_MULB + ot);
3809         break;
3810     case 0x1c0:
3811     case 0x1c1: /* xadd Ev, Gv */
3812         ot = mo_b_d(b, dflag);
3813         modrm = x86_ldub_code(env, s);
3814         reg = ((modrm >> 3) & 7) | REX_R(s);
3815         mod = (modrm >> 6) & 3;
3816         gen_op_mov_v_reg(s, ot, s->T0, reg);
3817         if (mod == 3) {
3818             rm = (modrm & 7) | REX_B(s);
3819             gen_op_mov_v_reg(s, ot, s->T1, rm);
3820             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3821             gen_op_mov_reg_v(s, ot, reg, s->T1);
3822             gen_op_mov_reg_v(s, ot, rm, s->T0);
3823         } else {
3824             gen_lea_modrm(env, s, modrm);
3825             if (s->prefix & PREFIX_LOCK) {
3826                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3827                                             s->mem_index, ot | MO_LE);
3828                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3829             } else {
3830                 gen_op_ld_v(s, ot, s->T1, s->A0);
3831                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3832                 gen_op_st_v(s, ot, s->T0, s->A0);
3833             }
3834             gen_op_mov_reg_v(s, ot, reg, s->T1);
3835         }
3836         gen_op_update2_cc(s);
3837         set_cc_op(s, CC_OP_ADDB + ot);
3838         break;
3839     case 0x1b0:
3840     case 0x1b1: /* cmpxchg Ev, Gv */
3841         {
3842             TCGv oldv, newv, cmpv, dest;
3843 
3844             ot = mo_b_d(b, dflag);
3845             modrm = x86_ldub_code(env, s);
3846             reg = ((modrm >> 3) & 7) | REX_R(s);
3847             mod = (modrm >> 6) & 3;
3848             oldv = tcg_temp_new();
3849             newv = tcg_temp_new();
3850             cmpv = tcg_temp_new();
3851             gen_op_mov_v_reg(s, ot, newv, reg);
3852             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3853             gen_extu(ot, cmpv);
3854             if (s->prefix & PREFIX_LOCK) {
3855                 if (mod == 3) {
3856                     goto illegal_op;
3857                 }
3858                 gen_lea_modrm(env, s, modrm);
3859                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3860                                           s->mem_index, ot | MO_LE);
3861             } else {
3862                 if (mod == 3) {
3863                     rm = (modrm & 7) | REX_B(s);
3864                     gen_op_mov_v_reg(s, ot, oldv, rm);
3865                     gen_extu(ot, oldv);
3866 
3867                     /*
3868                      * Unlike the memory case, where "the destination operand receives
3869                      * a write cycle without regard to the result of the comparison",
3870                      * rm must not be touched altogether if the write fails, including
3871                      * not zero-extending it on 64-bit processors.  So, precompute
3872                      * the result of a successful writeback and perform the movcond
3873                      * directly on cpu_regs.  Also need to write accumulator first, in
3874                      * case rm is part of RAX too.
3875                      */
3876                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3877                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3878                 } else {
3879                     gen_lea_modrm(env, s, modrm);
3880                     gen_op_ld_v(s, ot, oldv, s->A0);
3881 
3882                     /*
3883                      * Perform an unconditional store cycle like physical cpu;
3884                      * must be before changing accumulator to ensure
3885                      * idempotency if the store faults and the instruction
3886                      * is restarted
3887                      */
3888                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3889                     gen_op_st_v(s, ot, newv, s->A0);
3890                 }
3891             }
3892 	    /*
3893 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3894 	     * since it's dead here.
3895 	     */
3896             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3897             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3898             tcg_gen_mov_tl(cpu_cc_src, oldv);
3899             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3900             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3901             set_cc_op(s, CC_OP_SUBB + ot);
3902         }
3903         break;
3904     case 0x1c7: /* cmpxchg8b */
3905         modrm = x86_ldub_code(env, s);
3906         mod = (modrm >> 6) & 3;
3907         switch ((modrm >> 3) & 7) {
3908         case 1: /* CMPXCHG8, CMPXCHG16 */
3909             if (mod == 3) {
3910                 goto illegal_op;
3911             }
3912 #ifdef TARGET_X86_64
3913             if (dflag == MO_64) {
3914                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3915                     goto illegal_op;
3916                 }
3917                 gen_cmpxchg16b(s, env, modrm);
3918                 break;
3919             }
3920 #endif
3921             if (!(s->cpuid_features & CPUID_CX8)) {
3922                 goto illegal_op;
3923             }
3924             gen_cmpxchg8b(s, env, modrm);
3925             break;
3926 
3927         case 7: /* RDSEED, RDPID with f3 prefix */
3928             if (mod != 3 ||
3929                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3930                 goto illegal_op;
3931             }
3932             if (s->prefix & PREFIX_REPZ) {
3933                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3934                     goto illegal_op;
3935                 }
3936                 gen_helper_rdpid(s->T0, tcg_env);
3937                 rm = (modrm & 7) | REX_B(s);
3938                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3939                 break;
3940             } else {
3941                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3942                     goto illegal_op;
3943                 }
3944                 goto do_rdrand;
3945             }
3946 
3947         case 6: /* RDRAND */
3948             if (mod != 3 ||
3949                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3950                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3951                 goto illegal_op;
3952             }
3953         do_rdrand:
3954             translator_io_start(&s->base);
3955             gen_helper_rdrand(s->T0, tcg_env);
3956             rm = (modrm & 7) | REX_B(s);
3957             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3958             set_cc_op(s, CC_OP_EFLAGS);
3959             break;
3960 
3961         default:
3962             goto illegal_op;
3963         }
3964         break;
3965 
3966         /**************************/
3967         /* push/pop */
3968     case 0x50 ... 0x57: /* push */
3969         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3970         gen_push_v(s, s->T0);
3971         break;
3972     case 0x58 ... 0x5f: /* pop */
3973         ot = gen_pop_T0(s);
3974         /* NOTE: order is important for pop %sp */
3975         gen_pop_update(s, ot);
3976         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3977         break;
3978     case 0x60: /* pusha */
3979         if (CODE64(s))
3980             goto illegal_op;
3981         gen_pusha(s);
3982         break;
3983     case 0x61: /* popa */
3984         if (CODE64(s))
3985             goto illegal_op;
3986         gen_popa(s);
3987         break;
3988     case 0x68: /* push Iv */
3989     case 0x6a:
3990         ot = mo_pushpop(s, dflag);
3991         if (b == 0x68)
3992             val = insn_get(env, s, ot);
3993         else
3994             val = (int8_t)insn_get(env, s, MO_8);
3995         tcg_gen_movi_tl(s->T0, val);
3996         gen_push_v(s, s->T0);
3997         break;
3998     case 0x8f: /* pop Ev */
3999         modrm = x86_ldub_code(env, s);
4000         mod = (modrm >> 6) & 3;
4001         ot = gen_pop_T0(s);
4002         if (mod == 3) {
4003             /* NOTE: order is important for pop %sp */
4004             gen_pop_update(s, ot);
4005             rm = (modrm & 7) | REX_B(s);
4006             gen_op_mov_reg_v(s, ot, rm, s->T0);
4007         } else {
4008             /* NOTE: order is important too for MMU exceptions */
4009             s->popl_esp_hack = 1 << ot;
4010             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4011             s->popl_esp_hack = 0;
4012             gen_pop_update(s, ot);
4013         }
4014         break;
4015     case 0xc8: /* enter */
4016         {
4017             int level;
4018             val = x86_lduw_code(env, s);
4019             level = x86_ldub_code(env, s);
4020             gen_enter(s, val, level);
4021         }
4022         break;
4023     case 0xc9: /* leave */
4024         gen_leave(s);
4025         break;
4026     case 0x06: /* push es */
4027     case 0x0e: /* push cs */
4028     case 0x16: /* push ss */
4029     case 0x1e: /* push ds */
4030         if (CODE64(s))
4031             goto illegal_op;
4032         gen_op_movl_T0_seg(s, b >> 3);
4033         gen_push_v(s, s->T0);
4034         break;
4035     case 0x1a0: /* push fs */
4036     case 0x1a8: /* push gs */
4037         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4038         gen_push_v(s, s->T0);
4039         break;
4040     case 0x07: /* pop es */
4041     case 0x17: /* pop ss */
4042     case 0x1f: /* pop ds */
4043         if (CODE64(s))
4044             goto illegal_op;
4045         reg = b >> 3;
4046         ot = gen_pop_T0(s);
4047         gen_movl_seg_T0(s, reg);
4048         gen_pop_update(s, ot);
4049         break;
4050     case 0x1a1: /* pop fs */
4051     case 0x1a9: /* pop gs */
4052         ot = gen_pop_T0(s);
4053         gen_movl_seg_T0(s, (b >> 3) & 7);
4054         gen_pop_update(s, ot);
4055         break;
4056 
4057         /**************************/
4058         /* mov */
4059     case 0x88:
4060     case 0x89: /* mov Gv, Ev */
4061         ot = mo_b_d(b, dflag);
4062         modrm = x86_ldub_code(env, s);
4063         reg = ((modrm >> 3) & 7) | REX_R(s);
4064 
4065         /* generate a generic store */
4066         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4067         break;
4068     case 0xc6:
4069     case 0xc7: /* mov Ev, Iv */
4070         ot = mo_b_d(b, dflag);
4071         modrm = x86_ldub_code(env, s);
4072         mod = (modrm >> 6) & 3;
4073         if (mod != 3) {
4074             s->rip_offset = insn_const_size(ot);
4075             gen_lea_modrm(env, s, modrm);
4076         }
4077         val = insn_get(env, s, ot);
4078         tcg_gen_movi_tl(s->T0, val);
4079         if (mod != 3) {
4080             gen_op_st_v(s, ot, s->T0, s->A0);
4081         } else {
4082             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4083         }
4084         break;
4085     case 0x8a:
4086     case 0x8b: /* mov Ev, Gv */
4087         ot = mo_b_d(b, dflag);
4088         modrm = x86_ldub_code(env, s);
4089         reg = ((modrm >> 3) & 7) | REX_R(s);
4090 
4091         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4092         gen_op_mov_reg_v(s, ot, reg, s->T0);
4093         break;
4094     case 0x8e: /* mov seg, Gv */
4095         modrm = x86_ldub_code(env, s);
4096         reg = (modrm >> 3) & 7;
4097         if (reg >= 6 || reg == R_CS)
4098             goto illegal_op;
4099         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4100         gen_movl_seg_T0(s, reg);
4101         break;
4102     case 0x8c: /* mov Gv, seg */
4103         modrm = x86_ldub_code(env, s);
4104         reg = (modrm >> 3) & 7;
4105         mod = (modrm >> 6) & 3;
4106         if (reg >= 6)
4107             goto illegal_op;
4108         gen_op_movl_T0_seg(s, reg);
4109         ot = mod == 3 ? dflag : MO_16;
4110         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4111         break;
4112 
4113     case 0x1b6: /* movzbS Gv, Eb */
4114     case 0x1b7: /* movzwS Gv, Eb */
4115     case 0x1be: /* movsbS Gv, Eb */
4116     case 0x1bf: /* movswS Gv, Eb */
4117         {
4118             MemOp d_ot;
4119             MemOp s_ot;
4120 
4121             /* d_ot is the size of destination */
4122             d_ot = dflag;
4123             /* ot is the size of source */
4124             ot = (b & 1) + MO_8;
4125             /* s_ot is the sign+size of source */
4126             s_ot = b & 8 ? MO_SIGN | ot : ot;
4127 
4128             modrm = x86_ldub_code(env, s);
4129             reg = ((modrm >> 3) & 7) | REX_R(s);
4130             mod = (modrm >> 6) & 3;
4131             rm = (modrm & 7) | REX_B(s);
4132 
4133             if (mod == 3) {
4134                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4135                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4136                 } else {
4137                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4138                     switch (s_ot) {
4139                     case MO_UB:
4140                         tcg_gen_ext8u_tl(s->T0, s->T0);
4141                         break;
4142                     case MO_SB:
4143                         tcg_gen_ext8s_tl(s->T0, s->T0);
4144                         break;
4145                     case MO_UW:
4146                         tcg_gen_ext16u_tl(s->T0, s->T0);
4147                         break;
4148                     default:
4149                     case MO_SW:
4150                         tcg_gen_ext16s_tl(s->T0, s->T0);
4151                         break;
4152                     }
4153                 }
4154                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4155             } else {
4156                 gen_lea_modrm(env, s, modrm);
4157                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4158                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4159             }
4160         }
4161         break;
4162 
4163     case 0x8d: /* lea */
4164         modrm = x86_ldub_code(env, s);
4165         mod = (modrm >> 6) & 3;
4166         if (mod == 3)
4167             goto illegal_op;
4168         reg = ((modrm >> 3) & 7) | REX_R(s);
4169         {
4170             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4171             TCGv ea = gen_lea_modrm_1(s, a, false);
4172             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4173             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4174         }
4175         break;
4176 
4177     case 0xa0: /* mov EAX, Ov */
4178     case 0xa1:
4179     case 0xa2: /* mov Ov, EAX */
4180     case 0xa3:
4181         {
4182             target_ulong offset_addr;
4183 
4184             ot = mo_b_d(b, dflag);
4185             offset_addr = insn_get_addr(env, s, s->aflag);
4186             tcg_gen_movi_tl(s->A0, offset_addr);
4187             gen_add_A0_ds_seg(s);
4188             if ((b & 2) == 0) {
4189                 gen_op_ld_v(s, ot, s->T0, s->A0);
4190                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4191             } else {
4192                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4193                 gen_op_st_v(s, ot, s->T0, s->A0);
4194             }
4195         }
4196         break;
4197     case 0xd7: /* xlat */
4198         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4199         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4200         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4201         gen_extu(s->aflag, s->A0);
4202         gen_add_A0_ds_seg(s);
4203         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4204         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4205         break;
4206     case 0xb0 ... 0xb7: /* mov R, Ib */
4207         val = insn_get(env, s, MO_8);
4208         tcg_gen_movi_tl(s->T0, val);
4209         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4210         break;
4211     case 0xb8 ... 0xbf: /* mov R, Iv */
4212 #ifdef TARGET_X86_64
4213         if (dflag == MO_64) {
4214             uint64_t tmp;
4215             /* 64 bit case */
4216             tmp = x86_ldq_code(env, s);
4217             reg = (b & 7) | REX_B(s);
4218             tcg_gen_movi_tl(s->T0, tmp);
4219             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4220         } else
4221 #endif
4222         {
4223             ot = dflag;
4224             val = insn_get(env, s, ot);
4225             reg = (b & 7) | REX_B(s);
4226             tcg_gen_movi_tl(s->T0, val);
4227             gen_op_mov_reg_v(s, ot, reg, s->T0);
4228         }
4229         break;
4230 
4231     case 0x91 ... 0x97: /* xchg R, EAX */
4232     do_xchg_reg_eax:
4233         ot = dflag;
4234         reg = (b & 7) | REX_B(s);
4235         rm = R_EAX;
4236         goto do_xchg_reg;
4237     case 0x86:
4238     case 0x87: /* xchg Ev, Gv */
4239         ot = mo_b_d(b, dflag);
4240         modrm = x86_ldub_code(env, s);
4241         reg = ((modrm >> 3) & 7) | REX_R(s);
4242         mod = (modrm >> 6) & 3;
4243         if (mod == 3) {
4244             rm = (modrm & 7) | REX_B(s);
4245         do_xchg_reg:
4246             gen_op_mov_v_reg(s, ot, s->T0, reg);
4247             gen_op_mov_v_reg(s, ot, s->T1, rm);
4248             gen_op_mov_reg_v(s, ot, rm, s->T0);
4249             gen_op_mov_reg_v(s, ot, reg, s->T1);
4250         } else {
4251             gen_lea_modrm(env, s, modrm);
4252             gen_op_mov_v_reg(s, ot, s->T0, reg);
4253             /* for xchg, lock is implicit */
4254             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4255                                    s->mem_index, ot | MO_LE);
4256             gen_op_mov_reg_v(s, ot, reg, s->T1);
4257         }
4258         break;
4259     case 0xc4: /* les Gv */
4260         /* In CODE64 this is VEX3; see above.  */
4261         op = R_ES;
4262         goto do_lxx;
4263     case 0xc5: /* lds Gv */
4264         /* In CODE64 this is VEX2; see above.  */
4265         op = R_DS;
4266         goto do_lxx;
4267     case 0x1b2: /* lss Gv */
4268         op = R_SS;
4269         goto do_lxx;
4270     case 0x1b4: /* lfs Gv */
4271         op = R_FS;
4272         goto do_lxx;
4273     case 0x1b5: /* lgs Gv */
4274         op = R_GS;
4275     do_lxx:
4276         ot = dflag != MO_16 ? MO_32 : MO_16;
4277         modrm = x86_ldub_code(env, s);
4278         reg = ((modrm >> 3) & 7) | REX_R(s);
4279         mod = (modrm >> 6) & 3;
4280         if (mod == 3)
4281             goto illegal_op;
4282         gen_lea_modrm(env, s, modrm);
4283         gen_op_ld_v(s, ot, s->T1, s->A0);
4284         gen_add_A0_im(s, 1 << ot);
4285         /* load the segment first to handle exceptions properly */
4286         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4287         gen_movl_seg_T0(s, op);
4288         /* then put the data */
4289         gen_op_mov_reg_v(s, ot, reg, s->T1);
4290         break;
4291 
4292         /************************/
4293         /* shifts */
4294     case 0xc0:
4295     case 0xc1:
4296         /* shift Ev,Ib */
4297         shift = 2;
4298     grp2:
4299         {
4300             ot = mo_b_d(b, dflag);
4301             modrm = x86_ldub_code(env, s);
4302             mod = (modrm >> 6) & 3;
4303             op = (modrm >> 3) & 7;
4304 
4305             if (mod != 3) {
4306                 if (shift == 2) {
4307                     s->rip_offset = 1;
4308                 }
4309                 gen_lea_modrm(env, s, modrm);
4310                 opreg = OR_TMP0;
4311             } else {
4312                 opreg = (modrm & 7) | REX_B(s);
4313             }
4314 
4315             /* simpler op */
4316             if (shift == 0) {
4317                 gen_shift(s, op, ot, opreg, OR_ECX);
4318             } else {
4319                 if (shift == 2) {
4320                     shift = x86_ldub_code(env, s);
4321                 }
4322                 gen_shifti(s, op, ot, opreg, shift);
4323             }
4324         }
4325         break;
4326     case 0xd0:
4327     case 0xd1:
4328         /* shift Ev,1 */
4329         shift = 1;
4330         goto grp2;
4331     case 0xd2:
4332     case 0xd3:
4333         /* shift Ev,cl */
4334         shift = 0;
4335         goto grp2;
4336 
4337     case 0x1a4: /* shld imm */
4338         op = 0;
4339         shift = 1;
4340         goto do_shiftd;
4341     case 0x1a5: /* shld cl */
4342         op = 0;
4343         shift = 0;
4344         goto do_shiftd;
4345     case 0x1ac: /* shrd imm */
4346         op = 1;
4347         shift = 1;
4348         goto do_shiftd;
4349     case 0x1ad: /* shrd cl */
4350         op = 1;
4351         shift = 0;
4352     do_shiftd:
4353         ot = dflag;
4354         modrm = x86_ldub_code(env, s);
4355         mod = (modrm >> 6) & 3;
4356         rm = (modrm & 7) | REX_B(s);
4357         reg = ((modrm >> 3) & 7) | REX_R(s);
4358         if (mod != 3) {
4359             gen_lea_modrm(env, s, modrm);
4360             opreg = OR_TMP0;
4361         } else {
4362             opreg = rm;
4363         }
4364         gen_op_mov_v_reg(s, ot, s->T1, reg);
4365 
4366         if (shift) {
4367             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4368             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4369         } else {
4370             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4371         }
4372         break;
4373 
4374         /************************/
4375         /* floats */
4376     case 0xd8 ... 0xdf:
4377         {
4378             bool update_fip = true;
4379 
4380             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4381                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4382                 /* XXX: what to do if illegal op ? */
4383                 gen_exception(s, EXCP07_PREX);
4384                 break;
4385             }
4386             modrm = x86_ldub_code(env, s);
4387             mod = (modrm >> 6) & 3;
4388             rm = modrm & 7;
4389             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4390             if (mod != 3) {
4391                 /* memory op */
4392                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4393                 TCGv ea = gen_lea_modrm_1(s, a, false);
4394                 TCGv last_addr = tcg_temp_new();
4395                 bool update_fdp = true;
4396 
4397                 tcg_gen_mov_tl(last_addr, ea);
4398                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4399 
4400                 switch (op) {
4401                 case 0x00 ... 0x07: /* fxxxs */
4402                 case 0x10 ... 0x17: /* fixxxl */
4403                 case 0x20 ... 0x27: /* fxxxl */
4404                 case 0x30 ... 0x37: /* fixxx */
4405                     {
4406                         int op1;
4407                         op1 = op & 7;
4408 
4409                         switch (op >> 4) {
4410                         case 0:
4411                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4412                                                 s->mem_index, MO_LEUL);
4413                             gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
4414                             break;
4415                         case 1:
4416                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4417                                                 s->mem_index, MO_LEUL);
4418                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4419                             break;
4420                         case 2:
4421                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4422                                                 s->mem_index, MO_LEUQ);
4423                             gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
4424                             break;
4425                         case 3:
4426                         default:
4427                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4428                                                 s->mem_index, MO_LESW);
4429                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4430                             break;
4431                         }
4432 
4433                         gen_helper_fp_arith_ST0_FT0(op1);
4434                         if (op1 == 3) {
4435                             /* fcomp needs pop */
4436                             gen_helper_fpop(tcg_env);
4437                         }
4438                     }
4439                     break;
4440                 case 0x08: /* flds */
4441                 case 0x0a: /* fsts */
4442                 case 0x0b: /* fstps */
4443                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4444                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4445                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4446                     switch (op & 7) {
4447                     case 0:
4448                         switch (op >> 4) {
4449                         case 0:
4450                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4451                                                 s->mem_index, MO_LEUL);
4452                             gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
4453                             break;
4454                         case 1:
4455                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4456                                                 s->mem_index, MO_LEUL);
4457                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4458                             break;
4459                         case 2:
4460                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4461                                                 s->mem_index, MO_LEUQ);
4462                             gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
4463                             break;
4464                         case 3:
4465                         default:
4466                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4467                                                 s->mem_index, MO_LESW);
4468                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4469                             break;
4470                         }
4471                         break;
4472                     case 1:
4473                         /* XXX: the corresponding CPUID bit must be tested ! */
4474                         switch (op >> 4) {
4475                         case 1:
4476                             gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
4477                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4478                                                 s->mem_index, MO_LEUL);
4479                             break;
4480                         case 2:
4481                             gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
4482                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4483                                                 s->mem_index, MO_LEUQ);
4484                             break;
4485                         case 3:
4486                         default:
4487                             gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
4488                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4489                                                 s->mem_index, MO_LEUW);
4490                             break;
4491                         }
4492                         gen_helper_fpop(tcg_env);
4493                         break;
4494                     default:
4495                         switch (op >> 4) {
4496                         case 0:
4497                             gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
4498                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4499                                                 s->mem_index, MO_LEUL);
4500                             break;
4501                         case 1:
4502                             gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
4503                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4504                                                 s->mem_index, MO_LEUL);
4505                             break;
4506                         case 2:
4507                             gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
4508                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4509                                                 s->mem_index, MO_LEUQ);
4510                             break;
4511                         case 3:
4512                         default:
4513                             gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
4514                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4515                                                 s->mem_index, MO_LEUW);
4516                             break;
4517                         }
4518                         if ((op & 7) == 3) {
4519                             gen_helper_fpop(tcg_env);
4520                         }
4521                         break;
4522                     }
4523                     break;
4524                 case 0x0c: /* fldenv mem */
4525                     gen_helper_fldenv(tcg_env, s->A0,
4526                                       tcg_constant_i32(dflag - 1));
4527                     update_fip = update_fdp = false;
4528                     break;
4529                 case 0x0d: /* fldcw mem */
4530                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4531                                         s->mem_index, MO_LEUW);
4532                     gen_helper_fldcw(tcg_env, s->tmp2_i32);
4533                     update_fip = update_fdp = false;
4534                     break;
4535                 case 0x0e: /* fnstenv mem */
4536                     gen_helper_fstenv(tcg_env, s->A0,
4537                                       tcg_constant_i32(dflag - 1));
4538                     update_fip = update_fdp = false;
4539                     break;
4540                 case 0x0f: /* fnstcw mem */
4541                     gen_helper_fnstcw(s->tmp2_i32, tcg_env);
4542                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4543                                         s->mem_index, MO_LEUW);
4544                     update_fip = update_fdp = false;
4545                     break;
4546                 case 0x1d: /* fldt mem */
4547                     gen_helper_fldt_ST0(tcg_env, s->A0);
4548                     break;
4549                 case 0x1f: /* fstpt mem */
4550                     gen_helper_fstt_ST0(tcg_env, s->A0);
4551                     gen_helper_fpop(tcg_env);
4552                     break;
4553                 case 0x2c: /* frstor mem */
4554                     gen_helper_frstor(tcg_env, s->A0,
4555                                       tcg_constant_i32(dflag - 1));
4556                     update_fip = update_fdp = false;
4557                     break;
4558                 case 0x2e: /* fnsave mem */
4559                     gen_helper_fsave(tcg_env, s->A0,
4560                                      tcg_constant_i32(dflag - 1));
4561                     update_fip = update_fdp = false;
4562                     break;
4563                 case 0x2f: /* fnstsw mem */
4564                     gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4565                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4566                                         s->mem_index, MO_LEUW);
4567                     update_fip = update_fdp = false;
4568                     break;
4569                 case 0x3c: /* fbld */
4570                     gen_helper_fbld_ST0(tcg_env, s->A0);
4571                     break;
4572                 case 0x3e: /* fbstp */
4573                     gen_helper_fbst_ST0(tcg_env, s->A0);
4574                     gen_helper_fpop(tcg_env);
4575                     break;
4576                 case 0x3d: /* fildll */
4577                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4578                                         s->mem_index, MO_LEUQ);
4579                     gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
4580                     break;
4581                 case 0x3f: /* fistpll */
4582                     gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
4583                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4584                                         s->mem_index, MO_LEUQ);
4585                     gen_helper_fpop(tcg_env);
4586                     break;
4587                 default:
4588                     goto unknown_op;
4589                 }
4590 
4591                 if (update_fdp) {
4592                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4593 
4594                     tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4595                                    offsetof(CPUX86State,
4596                                             segs[last_seg].selector));
4597                     tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4598                                      offsetof(CPUX86State, fpds));
4599                     tcg_gen_st_tl(last_addr, tcg_env,
4600                                   offsetof(CPUX86State, fpdp));
4601                 }
4602             } else {
4603                 /* register float ops */
4604                 opreg = rm;
4605 
4606                 switch (op) {
4607                 case 0x08: /* fld sti */
4608                     gen_helper_fpush(tcg_env);
4609                     gen_helper_fmov_ST0_STN(tcg_env,
4610                                             tcg_constant_i32((opreg + 1) & 7));
4611                     break;
4612                 case 0x09: /* fxchg sti */
4613                 case 0x29: /* fxchg4 sti, undocumented op */
4614                 case 0x39: /* fxchg7 sti, undocumented op */
4615                     gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
4616                     break;
4617                 case 0x0a: /* grp d9/2 */
4618                     switch (rm) {
4619                     case 0: /* fnop */
4620                         /*
4621                          * check exceptions (FreeBSD FPU probe)
4622                          * needs to be treated as I/O because of ferr_irq
4623                          */
4624                         translator_io_start(&s->base);
4625                         gen_helper_fwait(tcg_env);
4626                         update_fip = false;
4627                         break;
4628                     default:
4629                         goto unknown_op;
4630                     }
4631                     break;
4632                 case 0x0c: /* grp d9/4 */
4633                     switch (rm) {
4634                     case 0: /* fchs */
4635                         gen_helper_fchs_ST0(tcg_env);
4636                         break;
4637                     case 1: /* fabs */
4638                         gen_helper_fabs_ST0(tcg_env);
4639                         break;
4640                     case 4: /* ftst */
4641                         gen_helper_fldz_FT0(tcg_env);
4642                         gen_helper_fcom_ST0_FT0(tcg_env);
4643                         break;
4644                     case 5: /* fxam */
4645                         gen_helper_fxam_ST0(tcg_env);
4646                         break;
4647                     default:
4648                         goto unknown_op;
4649                     }
4650                     break;
4651                 case 0x0d: /* grp d9/5 */
4652                     {
4653                         switch (rm) {
4654                         case 0:
4655                             gen_helper_fpush(tcg_env);
4656                             gen_helper_fld1_ST0(tcg_env);
4657                             break;
4658                         case 1:
4659                             gen_helper_fpush(tcg_env);
4660                             gen_helper_fldl2t_ST0(tcg_env);
4661                             break;
4662                         case 2:
4663                             gen_helper_fpush(tcg_env);
4664                             gen_helper_fldl2e_ST0(tcg_env);
4665                             break;
4666                         case 3:
4667                             gen_helper_fpush(tcg_env);
4668                             gen_helper_fldpi_ST0(tcg_env);
4669                             break;
4670                         case 4:
4671                             gen_helper_fpush(tcg_env);
4672                             gen_helper_fldlg2_ST0(tcg_env);
4673                             break;
4674                         case 5:
4675                             gen_helper_fpush(tcg_env);
4676                             gen_helper_fldln2_ST0(tcg_env);
4677                             break;
4678                         case 6:
4679                             gen_helper_fpush(tcg_env);
4680                             gen_helper_fldz_ST0(tcg_env);
4681                             break;
4682                         default:
4683                             goto unknown_op;
4684                         }
4685                     }
4686                     break;
4687                 case 0x0e: /* grp d9/6 */
4688                     switch (rm) {
4689                     case 0: /* f2xm1 */
4690                         gen_helper_f2xm1(tcg_env);
4691                         break;
4692                     case 1: /* fyl2x */
4693                         gen_helper_fyl2x(tcg_env);
4694                         break;
4695                     case 2: /* fptan */
4696                         gen_helper_fptan(tcg_env);
4697                         break;
4698                     case 3: /* fpatan */
4699                         gen_helper_fpatan(tcg_env);
4700                         break;
4701                     case 4: /* fxtract */
4702                         gen_helper_fxtract(tcg_env);
4703                         break;
4704                     case 5: /* fprem1 */
4705                         gen_helper_fprem1(tcg_env);
4706                         break;
4707                     case 6: /* fdecstp */
4708                         gen_helper_fdecstp(tcg_env);
4709                         break;
4710                     default:
4711                     case 7: /* fincstp */
4712                         gen_helper_fincstp(tcg_env);
4713                         break;
4714                     }
4715                     break;
4716                 case 0x0f: /* grp d9/7 */
4717                     switch (rm) {
4718                     case 0: /* fprem */
4719                         gen_helper_fprem(tcg_env);
4720                         break;
4721                     case 1: /* fyl2xp1 */
4722                         gen_helper_fyl2xp1(tcg_env);
4723                         break;
4724                     case 2: /* fsqrt */
4725                         gen_helper_fsqrt(tcg_env);
4726                         break;
4727                     case 3: /* fsincos */
4728                         gen_helper_fsincos(tcg_env);
4729                         break;
4730                     case 5: /* fscale */
4731                         gen_helper_fscale(tcg_env);
4732                         break;
4733                     case 4: /* frndint */
4734                         gen_helper_frndint(tcg_env);
4735                         break;
4736                     case 6: /* fsin */
4737                         gen_helper_fsin(tcg_env);
4738                         break;
4739                     default:
4740                     case 7: /* fcos */
4741                         gen_helper_fcos(tcg_env);
4742                         break;
4743                     }
4744                     break;
4745                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4746                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4747                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4748                     {
4749                         int op1;
4750 
4751                         op1 = op & 7;
4752                         if (op >= 0x20) {
4753                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4754                             if (op >= 0x30) {
4755                                 gen_helper_fpop(tcg_env);
4756                             }
4757                         } else {
4758                             gen_helper_fmov_FT0_STN(tcg_env,
4759                                                     tcg_constant_i32(opreg));
4760                             gen_helper_fp_arith_ST0_FT0(op1);
4761                         }
4762                     }
4763                     break;
4764                 case 0x02: /* fcom */
4765                 case 0x22: /* fcom2, undocumented op */
4766                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4767                     gen_helper_fcom_ST0_FT0(tcg_env);
4768                     break;
4769                 case 0x03: /* fcomp */
4770                 case 0x23: /* fcomp3, undocumented op */
4771                 case 0x32: /* fcomp5, undocumented op */
4772                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4773                     gen_helper_fcom_ST0_FT0(tcg_env);
4774                     gen_helper_fpop(tcg_env);
4775                     break;
4776                 case 0x15: /* da/5 */
4777                     switch (rm) {
4778                     case 1: /* fucompp */
4779                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4780                         gen_helper_fucom_ST0_FT0(tcg_env);
4781                         gen_helper_fpop(tcg_env);
4782                         gen_helper_fpop(tcg_env);
4783                         break;
4784                     default:
4785                         goto unknown_op;
4786                     }
4787                     break;
4788                 case 0x1c:
4789                     switch (rm) {
4790                     case 0: /* feni (287 only, just do nop here) */
4791                         break;
4792                     case 1: /* fdisi (287 only, just do nop here) */
4793                         break;
4794                     case 2: /* fclex */
4795                         gen_helper_fclex(tcg_env);
4796                         update_fip = false;
4797                         break;
4798                     case 3: /* fninit */
4799                         gen_helper_fninit(tcg_env);
4800                         update_fip = false;
4801                         break;
4802                     case 4: /* fsetpm (287 only, just do nop here) */
4803                         break;
4804                     default:
4805                         goto unknown_op;
4806                     }
4807                     break;
4808                 case 0x1d: /* fucomi */
4809                     if (!(s->cpuid_features & CPUID_CMOV)) {
4810                         goto illegal_op;
4811                     }
4812                     gen_update_cc_op(s);
4813                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4814                     gen_helper_fucomi_ST0_FT0(tcg_env);
4815                     set_cc_op(s, CC_OP_EFLAGS);
4816                     break;
4817                 case 0x1e: /* fcomi */
4818                     if (!(s->cpuid_features & CPUID_CMOV)) {
4819                         goto illegal_op;
4820                     }
4821                     gen_update_cc_op(s);
4822                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4823                     gen_helper_fcomi_ST0_FT0(tcg_env);
4824                     set_cc_op(s, CC_OP_EFLAGS);
4825                     break;
4826                 case 0x28: /* ffree sti */
4827                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4828                     break;
4829                 case 0x2a: /* fst sti */
4830                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4831                     break;
4832                 case 0x2b: /* fstp sti */
4833                 case 0x0b: /* fstp1 sti, undocumented op */
4834                 case 0x3a: /* fstp8 sti, undocumented op */
4835                 case 0x3b: /* fstp9 sti, undocumented op */
4836                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4837                     gen_helper_fpop(tcg_env);
4838                     break;
4839                 case 0x2c: /* fucom st(i) */
4840                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4841                     gen_helper_fucom_ST0_FT0(tcg_env);
4842                     break;
4843                 case 0x2d: /* fucomp st(i) */
4844                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4845                     gen_helper_fucom_ST0_FT0(tcg_env);
4846                     gen_helper_fpop(tcg_env);
4847                     break;
4848                 case 0x33: /* de/3 */
4849                     switch (rm) {
4850                     case 1: /* fcompp */
4851                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4852                         gen_helper_fcom_ST0_FT0(tcg_env);
4853                         gen_helper_fpop(tcg_env);
4854                         gen_helper_fpop(tcg_env);
4855                         break;
4856                     default:
4857                         goto unknown_op;
4858                     }
4859                     break;
4860                 case 0x38: /* ffreep sti, undocumented op */
4861                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4862                     gen_helper_fpop(tcg_env);
4863                     break;
4864                 case 0x3c: /* df/4 */
4865                     switch (rm) {
4866                     case 0:
4867                         gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4868                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4869                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4870                         break;
4871                     default:
4872                         goto unknown_op;
4873                     }
4874                     break;
4875                 case 0x3d: /* fucomip */
4876                     if (!(s->cpuid_features & CPUID_CMOV)) {
4877                         goto illegal_op;
4878                     }
4879                     gen_update_cc_op(s);
4880                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4881                     gen_helper_fucomi_ST0_FT0(tcg_env);
4882                     gen_helper_fpop(tcg_env);
4883                     set_cc_op(s, CC_OP_EFLAGS);
4884                     break;
4885                 case 0x3e: /* fcomip */
4886                     if (!(s->cpuid_features & CPUID_CMOV)) {
4887                         goto illegal_op;
4888                     }
4889                     gen_update_cc_op(s);
4890                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4891                     gen_helper_fcomi_ST0_FT0(tcg_env);
4892                     gen_helper_fpop(tcg_env);
4893                     set_cc_op(s, CC_OP_EFLAGS);
4894                     break;
4895                 case 0x10 ... 0x13: /* fcmovxx */
4896                 case 0x18 ... 0x1b:
4897                     {
4898                         int op1;
4899                         TCGLabel *l1;
4900                         static const uint8_t fcmov_cc[8] = {
4901                             (JCC_B << 1),
4902                             (JCC_Z << 1),
4903                             (JCC_BE << 1),
4904                             (JCC_P << 1),
4905                         };
4906 
4907                         if (!(s->cpuid_features & CPUID_CMOV)) {
4908                             goto illegal_op;
4909                         }
4910                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4911                         l1 = gen_new_label();
4912                         gen_jcc1_noeob(s, op1, l1);
4913                         gen_helper_fmov_ST0_STN(tcg_env,
4914                                                 tcg_constant_i32(opreg));
4915                         gen_set_label(l1);
4916                     }
4917                     break;
4918                 default:
4919                     goto unknown_op;
4920                 }
4921             }
4922 
4923             if (update_fip) {
4924                 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4925                                offsetof(CPUX86State, segs[R_CS].selector));
4926                 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4927                                  offsetof(CPUX86State, fpcs));
4928                 tcg_gen_st_tl(eip_cur_tl(s),
4929                               tcg_env, offsetof(CPUX86State, fpip));
4930             }
4931         }
4932         break;
4933         /************************/
4934         /* string ops */
4935 
4936     case 0xa4: /* movsS */
4937     case 0xa5:
4938         ot = mo_b_d(b, dflag);
4939         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4940             gen_repz_movs(s, ot);
4941         } else {
4942             gen_movs(s, ot);
4943         }
4944         break;
4945 
4946     case 0xaa: /* stosS */
4947     case 0xab:
4948         ot = mo_b_d(b, dflag);
4949         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4950             gen_repz_stos(s, ot);
4951         } else {
4952             gen_stos(s, ot);
4953         }
4954         break;
4955     case 0xac: /* lodsS */
4956     case 0xad:
4957         ot = mo_b_d(b, dflag);
4958         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4959             gen_repz_lods(s, ot);
4960         } else {
4961             gen_lods(s, ot);
4962         }
4963         break;
4964     case 0xae: /* scasS */
4965     case 0xaf:
4966         ot = mo_b_d(b, dflag);
4967         if (prefixes & PREFIX_REPNZ) {
4968             gen_repz_scas(s, ot, 1);
4969         } else if (prefixes & PREFIX_REPZ) {
4970             gen_repz_scas(s, ot, 0);
4971         } else {
4972             gen_scas(s, ot);
4973         }
4974         break;
4975 
4976     case 0xa6: /* cmpsS */
4977     case 0xa7:
4978         ot = mo_b_d(b, dflag);
4979         if (prefixes & PREFIX_REPNZ) {
4980             gen_repz_cmps(s, ot, 1);
4981         } else if (prefixes & PREFIX_REPZ) {
4982             gen_repz_cmps(s, ot, 0);
4983         } else {
4984             gen_cmps(s, ot);
4985         }
4986         break;
4987     case 0x6c: /* insS */
4988     case 0x6d:
4989         ot = mo_b_d32(b, dflag);
4990         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4991         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4992         if (!gen_check_io(s, ot, s->tmp2_i32,
4993                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4994             break;
4995         }
4996         translator_io_start(&s->base);
4997         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4998             gen_repz_ins(s, ot);
4999         } else {
5000             gen_ins(s, ot);
5001         }
5002         break;
5003     case 0x6e: /* outsS */
5004     case 0x6f:
5005         ot = mo_b_d32(b, dflag);
5006         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5007         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5008         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5009             break;
5010         }
5011         translator_io_start(&s->base);
5012         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5013             gen_repz_outs(s, ot);
5014         } else {
5015             gen_outs(s, ot);
5016         }
5017         break;
5018 
5019         /************************/
5020         /* port I/O */
5021 
5022     case 0xe4:
5023     case 0xe5:
5024         ot = mo_b_d32(b, dflag);
5025         val = x86_ldub_code(env, s);
5026         tcg_gen_movi_i32(s->tmp2_i32, val);
5027         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5028             break;
5029         }
5030         translator_io_start(&s->base);
5031         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5032         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5033         gen_bpt_io(s, s->tmp2_i32, ot);
5034         break;
5035     case 0xe6:
5036     case 0xe7:
5037         ot = mo_b_d32(b, dflag);
5038         val = x86_ldub_code(env, s);
5039         tcg_gen_movi_i32(s->tmp2_i32, val);
5040         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5041             break;
5042         }
5043         translator_io_start(&s->base);
5044         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5045         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5046         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5047         gen_bpt_io(s, s->tmp2_i32, ot);
5048         break;
5049     case 0xec:
5050     case 0xed:
5051         ot = mo_b_d32(b, dflag);
5052         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5053         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5054         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5055             break;
5056         }
5057         translator_io_start(&s->base);
5058         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5059         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5060         gen_bpt_io(s, s->tmp2_i32, ot);
5061         break;
5062     case 0xee:
5063     case 0xef:
5064         ot = mo_b_d32(b, dflag);
5065         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5066         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5067         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5068             break;
5069         }
5070         translator_io_start(&s->base);
5071         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5072         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5073         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5074         gen_bpt_io(s, s->tmp2_i32, ot);
5075         break;
5076 
5077         /************************/
5078         /* control */
5079     case 0xc2: /* ret im */
5080         val = x86_ldsw_code(env, s);
5081         ot = gen_pop_T0(s);
5082         gen_stack_update(s, val + (1 << ot));
5083         /* Note that gen_pop_T0 uses a zero-extending load.  */
5084         gen_op_jmp_v(s, s->T0);
5085         gen_bnd_jmp(s);
5086         s->base.is_jmp = DISAS_JUMP;
5087         break;
5088     case 0xc3: /* ret */
5089         ot = gen_pop_T0(s);
5090         gen_pop_update(s, ot);
5091         /* Note that gen_pop_T0 uses a zero-extending load.  */
5092         gen_op_jmp_v(s, s->T0);
5093         gen_bnd_jmp(s);
5094         s->base.is_jmp = DISAS_JUMP;
5095         break;
5096     case 0xca: /* lret im */
5097         val = x86_ldsw_code(env, s);
5098     do_lret:
5099         if (PE(s) && !VM86(s)) {
5100             gen_update_cc_op(s);
5101             gen_update_eip_cur(s);
5102             gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5103                                       tcg_constant_i32(val));
5104         } else {
5105             gen_stack_A0(s);
5106             /* pop offset */
5107             gen_op_ld_v(s, dflag, s->T0, s->A0);
5108             /* NOTE: keeping EIP updated is not a problem in case of
5109                exception */
5110             gen_op_jmp_v(s, s->T0);
5111             /* pop selector */
5112             gen_add_A0_im(s, 1 << dflag);
5113             gen_op_ld_v(s, dflag, s->T0, s->A0);
5114             gen_op_movl_seg_T0_vm(s, R_CS);
5115             /* add stack offset */
5116             gen_stack_update(s, val + (2 << dflag));
5117         }
5118         s->base.is_jmp = DISAS_EOB_ONLY;
5119         break;
5120     case 0xcb: /* lret */
5121         val = 0;
5122         goto do_lret;
5123     case 0xcf: /* iret */
5124         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5125         if (!PE(s) || VM86(s)) {
5126             /* real mode or vm86 mode */
5127             if (!check_vm86_iopl(s)) {
5128                 break;
5129             }
5130             gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
5131         } else {
5132             gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5133                                       eip_next_i32(s));
5134         }
5135         set_cc_op(s, CC_OP_EFLAGS);
5136         s->base.is_jmp = DISAS_EOB_ONLY;
5137         break;
5138     case 0xe8: /* call im */
5139         {
5140             int diff = (dflag != MO_16
5141                         ? (int32_t)insn_get(env, s, MO_32)
5142                         : (int16_t)insn_get(env, s, MO_16));
5143             gen_push_v(s, eip_next_tl(s));
5144             gen_bnd_jmp(s);
5145             gen_jmp_rel(s, dflag, diff, 0);
5146         }
5147         break;
5148     case 0x9a: /* lcall im */
5149         {
5150             unsigned int selector, offset;
5151 
5152             if (CODE64(s))
5153                 goto illegal_op;
5154             ot = dflag;
5155             offset = insn_get(env, s, ot);
5156             selector = insn_get(env, s, MO_16);
5157 
5158             tcg_gen_movi_tl(s->T0, selector);
5159             tcg_gen_movi_tl(s->T1, offset);
5160         }
5161         goto do_lcall;
5162     case 0xe9: /* jmp im */
5163         {
5164             int diff = (dflag != MO_16
5165                         ? (int32_t)insn_get(env, s, MO_32)
5166                         : (int16_t)insn_get(env, s, MO_16));
5167             gen_bnd_jmp(s);
5168             gen_jmp_rel(s, dflag, diff, 0);
5169         }
5170         break;
5171     case 0xea: /* ljmp im */
5172         {
5173             unsigned int selector, offset;
5174 
5175             if (CODE64(s))
5176                 goto illegal_op;
5177             ot = dflag;
5178             offset = insn_get(env, s, ot);
5179             selector = insn_get(env, s, MO_16);
5180 
5181             tcg_gen_movi_tl(s->T0, selector);
5182             tcg_gen_movi_tl(s->T1, offset);
5183         }
5184         goto do_ljmp;
5185     case 0xeb: /* jmp Jb */
5186         {
5187             int diff = (int8_t)insn_get(env, s, MO_8);
5188             gen_jmp_rel(s, dflag, diff, 0);
5189         }
5190         break;
5191     case 0x70 ... 0x7f: /* jcc Jb */
5192         {
5193             int diff = (int8_t)insn_get(env, s, MO_8);
5194             gen_bnd_jmp(s);
5195             gen_jcc(s, b, diff);
5196         }
5197         break;
5198     case 0x180 ... 0x18f: /* jcc Jv */
5199         {
5200             int diff = (dflag != MO_16
5201                         ? (int32_t)insn_get(env, s, MO_32)
5202                         : (int16_t)insn_get(env, s, MO_16));
5203             gen_bnd_jmp(s);
5204             gen_jcc(s, b, diff);
5205         }
5206         break;
5207 
5208     case 0x190 ... 0x19f: /* setcc Gv */
5209         modrm = x86_ldub_code(env, s);
5210         gen_setcc1(s, b, s->T0);
5211         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5212         break;
5213     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5214         if (!(s->cpuid_features & CPUID_CMOV)) {
5215             goto illegal_op;
5216         }
5217         ot = dflag;
5218         modrm = x86_ldub_code(env, s);
5219         reg = ((modrm >> 3) & 7) | REX_R(s);
5220         gen_cmovcc1(env, s, ot, b, modrm, reg);
5221         break;
5222 
5223         /************************/
5224         /* flags */
5225     case 0x9c: /* pushf */
5226         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5227         if (check_vm86_iopl(s)) {
5228             gen_update_cc_op(s);
5229             gen_helper_read_eflags(s->T0, tcg_env);
5230             gen_push_v(s, s->T0);
5231         }
5232         break;
5233     case 0x9d: /* popf */
5234         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5235         if (check_vm86_iopl(s)) {
5236             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5237 
5238             if (CPL(s) == 0) {
5239                 mask |= IF_MASK | IOPL_MASK;
5240             } else if (CPL(s) <= IOPL(s)) {
5241                 mask |= IF_MASK;
5242             }
5243             if (dflag == MO_16) {
5244                 mask &= 0xffff;
5245             }
5246 
5247             ot = gen_pop_T0(s);
5248             gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
5249             gen_pop_update(s, ot);
5250             set_cc_op(s, CC_OP_EFLAGS);
5251             /* abort translation because TF/AC flag may change */
5252             s->base.is_jmp = DISAS_EOB_NEXT;
5253         }
5254         break;
5255     case 0x9e: /* sahf */
5256         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5257             goto illegal_op;
5258         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5259         gen_compute_eflags(s);
5260         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5261         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5262         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5263         break;
5264     case 0x9f: /* lahf */
5265         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5266             goto illegal_op;
5267         gen_compute_eflags(s);
5268         /* Note: gen_compute_eflags() only gives the condition codes */
5269         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5270         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5271         break;
5272     case 0xf5: /* cmc */
5273         gen_compute_eflags(s);
5274         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5275         break;
5276     case 0xf8: /* clc */
5277         gen_compute_eflags(s);
5278         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5279         break;
5280     case 0xf9: /* stc */
5281         gen_compute_eflags(s);
5282         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5283         break;
5284     case 0xfc: /* cld */
5285         tcg_gen_movi_i32(s->tmp2_i32, 1);
5286         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5287         break;
5288     case 0xfd: /* std */
5289         tcg_gen_movi_i32(s->tmp2_i32, -1);
5290         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5291         break;
5292 
5293         /************************/
5294         /* bit operations */
5295     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5296         ot = dflag;
5297         modrm = x86_ldub_code(env, s);
5298         op = (modrm >> 3) & 7;
5299         mod = (modrm >> 6) & 3;
5300         rm = (modrm & 7) | REX_B(s);
5301         if (mod != 3) {
5302             s->rip_offset = 1;
5303             gen_lea_modrm(env, s, modrm);
5304             if (!(s->prefix & PREFIX_LOCK)) {
5305                 gen_op_ld_v(s, ot, s->T0, s->A0);
5306             }
5307         } else {
5308             gen_op_mov_v_reg(s, ot, s->T0, rm);
5309         }
5310         /* load shift */
5311         val = x86_ldub_code(env, s);
5312         tcg_gen_movi_tl(s->T1, val);
5313         if (op < 4)
5314             goto unknown_op;
5315         op -= 4;
5316         goto bt_op;
5317     case 0x1a3: /* bt Gv, Ev */
5318         op = 0;
5319         goto do_btx;
5320     case 0x1ab: /* bts */
5321         op = 1;
5322         goto do_btx;
5323     case 0x1b3: /* btr */
5324         op = 2;
5325         goto do_btx;
5326     case 0x1bb: /* btc */
5327         op = 3;
5328     do_btx:
5329         ot = dflag;
5330         modrm = x86_ldub_code(env, s);
5331         reg = ((modrm >> 3) & 7) | REX_R(s);
5332         mod = (modrm >> 6) & 3;
5333         rm = (modrm & 7) | REX_B(s);
5334         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5335         if (mod != 3) {
5336             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5337             /* specific case: we need to add a displacement */
5338             gen_exts(ot, s->T1);
5339             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5340             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5341             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5342             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5343             if (!(s->prefix & PREFIX_LOCK)) {
5344                 gen_op_ld_v(s, ot, s->T0, s->A0);
5345             }
5346         } else {
5347             gen_op_mov_v_reg(s, ot, s->T0, rm);
5348         }
5349     bt_op:
5350         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5351         tcg_gen_movi_tl(s->tmp0, 1);
5352         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5353         if (s->prefix & PREFIX_LOCK) {
5354             switch (op) {
5355             case 0: /* bt */
5356                 /* Needs no atomic ops; we suppressed the normal
5357                    memory load for LOCK above so do it now.  */
5358                 gen_op_ld_v(s, ot, s->T0, s->A0);
5359                 break;
5360             case 1: /* bts */
5361                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5362                                            s->mem_index, ot | MO_LE);
5363                 break;
5364             case 2: /* btr */
5365                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5366                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5367                                             s->mem_index, ot | MO_LE);
5368                 break;
5369             default:
5370             case 3: /* btc */
5371                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5372                                             s->mem_index, ot | MO_LE);
5373                 break;
5374             }
5375             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5376         } else {
5377             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5378             switch (op) {
5379             case 0: /* bt */
5380                 /* Data already loaded; nothing to do.  */
5381                 break;
5382             case 1: /* bts */
5383                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5384                 break;
5385             case 2: /* btr */
5386                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5387                 break;
5388             default:
5389             case 3: /* btc */
5390                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5391                 break;
5392             }
5393             if (op != 0) {
5394                 if (mod != 3) {
5395                     gen_op_st_v(s, ot, s->T0, s->A0);
5396                 } else {
5397                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5398                 }
5399             }
5400         }
5401 
5402         /* Delay all CC updates until after the store above.  Note that
5403            C is the result of the test, Z is unchanged, and the others
5404            are all undefined.  */
5405         switch (s->cc_op) {
5406         case CC_OP_MULB ... CC_OP_MULQ:
5407         case CC_OP_ADDB ... CC_OP_ADDQ:
5408         case CC_OP_ADCB ... CC_OP_ADCQ:
5409         case CC_OP_SUBB ... CC_OP_SUBQ:
5410         case CC_OP_SBBB ... CC_OP_SBBQ:
5411         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5412         case CC_OP_INCB ... CC_OP_INCQ:
5413         case CC_OP_DECB ... CC_OP_DECQ:
5414         case CC_OP_SHLB ... CC_OP_SHLQ:
5415         case CC_OP_SARB ... CC_OP_SARQ:
5416         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5417             /* Z was going to be computed from the non-zero status of CC_DST.
5418                We can get that same Z value (and the new C value) by leaving
5419                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5420                same width.  */
5421             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5422             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5423             break;
5424         default:
5425             /* Otherwise, generate EFLAGS and replace the C bit.  */
5426             gen_compute_eflags(s);
5427             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5428                                ctz32(CC_C), 1);
5429             break;
5430         }
5431         break;
5432     case 0x1bc: /* bsf / tzcnt */
5433     case 0x1bd: /* bsr / lzcnt */
5434         ot = dflag;
5435         modrm = x86_ldub_code(env, s);
5436         reg = ((modrm >> 3) & 7) | REX_R(s);
5437         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5438         gen_extu(ot, s->T0);
5439 
5440         /* Note that lzcnt and tzcnt are in different extensions.  */
5441         if ((prefixes & PREFIX_REPZ)
5442             && (b & 1
5443                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5444                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5445             int size = 8 << ot;
5446             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5447             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5448             if (b & 1) {
5449                 /* For lzcnt, reduce the target_ulong result by the
5450                    number of zeros that we expect to find at the top.  */
5451                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5452                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5453             } else {
5454                 /* For tzcnt, a zero input must return the operand size.  */
5455                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5456             }
5457             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5458             gen_op_update1_cc(s);
5459             set_cc_op(s, CC_OP_BMILGB + ot);
5460         } else {
5461             /* For bsr/bsf, only the Z bit is defined and it is related
5462                to the input and not the result.  */
5463             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5464             set_cc_op(s, CC_OP_LOGICB + ot);
5465 
5466             /* ??? The manual says that the output is undefined when the
5467                input is zero, but real hardware leaves it unchanged, and
5468                real programs appear to depend on that.  Accomplish this
5469                by passing the output as the value to return upon zero.  */
5470             if (b & 1) {
5471                 /* For bsr, return the bit index of the first 1 bit,
5472                    not the count of leading zeros.  */
5473                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5474                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5475                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5476             } else {
5477                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5478             }
5479         }
5480         gen_op_mov_reg_v(s, ot, reg, s->T0);
5481         break;
5482         /************************/
5483         /* bcd */
5484     case 0x27: /* daa */
5485         if (CODE64(s))
5486             goto illegal_op;
5487         gen_update_cc_op(s);
5488         gen_helper_daa(tcg_env);
5489         set_cc_op(s, CC_OP_EFLAGS);
5490         break;
5491     case 0x2f: /* das */
5492         if (CODE64(s))
5493             goto illegal_op;
5494         gen_update_cc_op(s);
5495         gen_helper_das(tcg_env);
5496         set_cc_op(s, CC_OP_EFLAGS);
5497         break;
5498     case 0x37: /* aaa */
5499         if (CODE64(s))
5500             goto illegal_op;
5501         gen_update_cc_op(s);
5502         gen_helper_aaa(tcg_env);
5503         set_cc_op(s, CC_OP_EFLAGS);
5504         break;
5505     case 0x3f: /* aas */
5506         if (CODE64(s))
5507             goto illegal_op;
5508         gen_update_cc_op(s);
5509         gen_helper_aas(tcg_env);
5510         set_cc_op(s, CC_OP_EFLAGS);
5511         break;
5512     case 0xd4: /* aam */
5513         if (CODE64(s))
5514             goto illegal_op;
5515         val = x86_ldub_code(env, s);
5516         if (val == 0) {
5517             gen_exception(s, EXCP00_DIVZ);
5518         } else {
5519             gen_helper_aam(tcg_env, tcg_constant_i32(val));
5520             set_cc_op(s, CC_OP_LOGICB);
5521         }
5522         break;
5523     case 0xd5: /* aad */
5524         if (CODE64(s))
5525             goto illegal_op;
5526         val = x86_ldub_code(env, s);
5527         gen_helper_aad(tcg_env, tcg_constant_i32(val));
5528         set_cc_op(s, CC_OP_LOGICB);
5529         break;
5530         /************************/
5531         /* misc */
5532     case 0x90: /* nop */
5533         /* XXX: correct lock test for all insn */
5534         if (prefixes & PREFIX_LOCK) {
5535             goto illegal_op;
5536         }
5537         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5538         if (REX_B(s)) {
5539             goto do_xchg_reg_eax;
5540         }
5541         if (prefixes & PREFIX_REPZ) {
5542             gen_update_cc_op(s);
5543             gen_update_eip_cur(s);
5544             gen_helper_pause(tcg_env, cur_insn_len_i32(s));
5545             s->base.is_jmp = DISAS_NORETURN;
5546         }
5547         break;
5548     case 0x9b: /* fwait */
5549         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5550             (HF_MP_MASK | HF_TS_MASK)) {
5551             gen_exception(s, EXCP07_PREX);
5552         } else {
5553             /* needs to be treated as I/O because of ferr_irq */
5554             translator_io_start(&s->base);
5555             gen_helper_fwait(tcg_env);
5556         }
5557         break;
5558     case 0xcc: /* int3 */
5559         gen_interrupt(s, EXCP03_INT3);
5560         break;
5561     case 0xcd: /* int N */
5562         val = x86_ldub_code(env, s);
5563         if (check_vm86_iopl(s)) {
5564             gen_interrupt(s, val);
5565         }
5566         break;
5567     case 0xce: /* into */
5568         if (CODE64(s))
5569             goto illegal_op;
5570         gen_update_cc_op(s);
5571         gen_update_eip_cur(s);
5572         gen_helper_into(tcg_env, cur_insn_len_i32(s));
5573         break;
5574 #ifdef WANT_ICEBP
5575     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5576         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5577         gen_debug(s);
5578         break;
5579 #endif
5580     case 0xfa: /* cli */
5581         if (check_iopl(s)) {
5582             gen_reset_eflags(s, IF_MASK);
5583         }
5584         break;
5585     case 0xfb: /* sti */
5586         if (check_iopl(s)) {
5587             gen_set_eflags(s, IF_MASK);
5588             /* interruptions are enabled only the first insn after sti */
5589             gen_update_eip_next(s);
5590             gen_eob_inhibit_irq(s, true);
5591         }
5592         break;
5593     case 0x62: /* bound */
5594         if (CODE64(s))
5595             goto illegal_op;
5596         ot = dflag;
5597         modrm = x86_ldub_code(env, s);
5598         reg = (modrm >> 3) & 7;
5599         mod = (modrm >> 6) & 3;
5600         if (mod == 3)
5601             goto illegal_op;
5602         gen_op_mov_v_reg(s, ot, s->T0, reg);
5603         gen_lea_modrm(env, s, modrm);
5604         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5605         if (ot == MO_16) {
5606             gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
5607         } else {
5608             gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
5609         }
5610         break;
5611     case 0x1c8 ... 0x1cf: /* bswap reg */
5612         reg = (b & 7) | REX_B(s);
5613 #ifdef TARGET_X86_64
5614         if (dflag == MO_64) {
5615             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5616             break;
5617         }
5618 #endif
5619         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5620         break;
5621     case 0xd6: /* salc */
5622         if (CODE64(s))
5623             goto illegal_op;
5624         gen_compute_eflags_c(s, s->T0);
5625         tcg_gen_neg_tl(s->T0, s->T0);
5626         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5627         break;
5628     case 0xe0: /* loopnz */
5629     case 0xe1: /* loopz */
5630     case 0xe2: /* loop */
5631     case 0xe3: /* jecxz */
5632         {
5633             TCGLabel *l1, *l2;
5634             int diff = (int8_t)insn_get(env, s, MO_8);
5635 
5636             l1 = gen_new_label();
5637             l2 = gen_new_label();
5638             gen_update_cc_op(s);
5639             b &= 3;
5640             switch(b) {
5641             case 0: /* loopnz */
5642             case 1: /* loopz */
5643                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5644                 gen_op_jz_ecx(s, l2);
5645                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5646                 break;
5647             case 2: /* loop */
5648                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5649                 gen_op_jnz_ecx(s, l1);
5650                 break;
5651             default:
5652             case 3: /* jcxz */
5653                 gen_op_jz_ecx(s, l1);
5654                 break;
5655             }
5656 
5657             gen_set_label(l2);
5658             gen_jmp_rel_csize(s, 0, 1);
5659 
5660             gen_set_label(l1);
5661             gen_jmp_rel(s, dflag, diff, 0);
5662         }
5663         break;
5664     case 0x130: /* wrmsr */
5665     case 0x132: /* rdmsr */
5666         if (check_cpl0(s)) {
5667             gen_update_cc_op(s);
5668             gen_update_eip_cur(s);
5669             if (b & 2) {
5670                 gen_helper_rdmsr(tcg_env);
5671             } else {
5672                 gen_helper_wrmsr(tcg_env);
5673                 s->base.is_jmp = DISAS_EOB_NEXT;
5674             }
5675         }
5676         break;
5677     case 0x131: /* rdtsc */
5678         gen_update_cc_op(s);
5679         gen_update_eip_cur(s);
5680         translator_io_start(&s->base);
5681         gen_helper_rdtsc(tcg_env);
5682         break;
5683     case 0x133: /* rdpmc */
5684         gen_update_cc_op(s);
5685         gen_update_eip_cur(s);
5686         gen_helper_rdpmc(tcg_env);
5687         s->base.is_jmp = DISAS_NORETURN;
5688         break;
5689     case 0x134: /* sysenter */
5690         /* For AMD SYSENTER is not valid in long mode */
5691         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5692             goto illegal_op;
5693         }
5694         if (!PE(s)) {
5695             gen_exception_gpf(s);
5696         } else {
5697             gen_helper_sysenter(tcg_env);
5698             s->base.is_jmp = DISAS_EOB_ONLY;
5699         }
5700         break;
5701     case 0x135: /* sysexit */
5702         /* For AMD SYSEXIT is not valid in long mode */
5703         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5704             goto illegal_op;
5705         }
5706         if (!PE(s) || CPL(s) != 0) {
5707             gen_exception_gpf(s);
5708         } else {
5709             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
5710             s->base.is_jmp = DISAS_EOB_ONLY;
5711         }
5712         break;
5713     case 0x105: /* syscall */
5714         /* For Intel SYSCALL is only valid in long mode */
5715         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5716             goto illegal_op;
5717         }
5718         gen_update_cc_op(s);
5719         gen_update_eip_cur(s);
5720         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
5721         /* TF handling for the syscall insn is different. The TF bit is  checked
5722            after the syscall insn completes. This allows #DB to not be
5723            generated after one has entered CPL0 if TF is set in FMASK.  */
5724         gen_eob_worker(s, false, true);
5725         break;
5726     case 0x107: /* sysret */
5727         /* For Intel SYSRET is only valid in long mode */
5728         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5729             goto illegal_op;
5730         }
5731         if (!PE(s) || CPL(s) != 0) {
5732             gen_exception_gpf(s);
5733         } else {
5734             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
5735             /* condition codes are modified only in long mode */
5736             if (LMA(s)) {
5737                 set_cc_op(s, CC_OP_EFLAGS);
5738             }
5739             /* TF handling for the sysret insn is different. The TF bit is
5740                checked after the sysret insn completes. This allows #DB to be
5741                generated "as if" the syscall insn in userspace has just
5742                completed.  */
5743             gen_eob_worker(s, false, true);
5744         }
5745         break;
5746     case 0x1a2: /* cpuid */
5747         gen_update_cc_op(s);
5748         gen_update_eip_cur(s);
5749         gen_helper_cpuid(tcg_env);
5750         break;
5751     case 0xf4: /* hlt */
5752         if (check_cpl0(s)) {
5753             gen_update_cc_op(s);
5754             gen_update_eip_cur(s);
5755             gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
5756             s->base.is_jmp = DISAS_NORETURN;
5757         }
5758         break;
5759     case 0x100:
5760         modrm = x86_ldub_code(env, s);
5761         mod = (modrm >> 6) & 3;
5762         op = (modrm >> 3) & 7;
5763         switch(op) {
5764         case 0: /* sldt */
5765             if (!PE(s) || VM86(s))
5766                 goto illegal_op;
5767             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5768                 break;
5769             }
5770             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5771             tcg_gen_ld32u_tl(s->T0, tcg_env,
5772                              offsetof(CPUX86State, ldt.selector));
5773             ot = mod == 3 ? dflag : MO_16;
5774             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5775             break;
5776         case 2: /* lldt */
5777             if (!PE(s) || VM86(s))
5778                 goto illegal_op;
5779             if (check_cpl0(s)) {
5780                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5781                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5782                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5783                 gen_helper_lldt(tcg_env, s->tmp2_i32);
5784             }
5785             break;
5786         case 1: /* str */
5787             if (!PE(s) || VM86(s))
5788                 goto illegal_op;
5789             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5790                 break;
5791             }
5792             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5793             tcg_gen_ld32u_tl(s->T0, tcg_env,
5794                              offsetof(CPUX86State, tr.selector));
5795             ot = mod == 3 ? dflag : MO_16;
5796             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5797             break;
5798         case 3: /* ltr */
5799             if (!PE(s) || VM86(s))
5800                 goto illegal_op;
5801             if (check_cpl0(s)) {
5802                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5803                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5804                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5805                 gen_helper_ltr(tcg_env, s->tmp2_i32);
5806             }
5807             break;
5808         case 4: /* verr */
5809         case 5: /* verw */
5810             if (!PE(s) || VM86(s))
5811                 goto illegal_op;
5812             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5813             gen_update_cc_op(s);
5814             if (op == 4) {
5815                 gen_helper_verr(tcg_env, s->T0);
5816             } else {
5817                 gen_helper_verw(tcg_env, s->T0);
5818             }
5819             set_cc_op(s, CC_OP_EFLAGS);
5820             break;
5821         default:
5822             goto unknown_op;
5823         }
5824         break;
5825 
5826     case 0x101:
5827         modrm = x86_ldub_code(env, s);
5828         switch (modrm) {
5829         CASE_MODRM_MEM_OP(0): /* sgdt */
5830             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5831                 break;
5832             }
5833             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5834             gen_lea_modrm(env, s, modrm);
5835             tcg_gen_ld32u_tl(s->T0,
5836                              tcg_env, offsetof(CPUX86State, gdt.limit));
5837             gen_op_st_v(s, MO_16, s->T0, s->A0);
5838             gen_add_A0_im(s, 2);
5839             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
5840             if (dflag == MO_16) {
5841                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5842             }
5843             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5844             break;
5845 
5846         case 0xc8: /* monitor */
5847             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5848                 goto illegal_op;
5849             }
5850             gen_update_cc_op(s);
5851             gen_update_eip_cur(s);
5852             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5853             gen_extu(s->aflag, s->A0);
5854             gen_add_A0_ds_seg(s);
5855             gen_helper_monitor(tcg_env, s->A0);
5856             break;
5857 
5858         case 0xc9: /* mwait */
5859             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5860                 goto illegal_op;
5861             }
5862             gen_update_cc_op(s);
5863             gen_update_eip_cur(s);
5864             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
5865             s->base.is_jmp = DISAS_NORETURN;
5866             break;
5867 
5868         case 0xca: /* clac */
5869             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5870                 || CPL(s) != 0) {
5871                 goto illegal_op;
5872             }
5873             gen_reset_eflags(s, AC_MASK);
5874             s->base.is_jmp = DISAS_EOB_NEXT;
5875             break;
5876 
5877         case 0xcb: /* stac */
5878             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5879                 || CPL(s) != 0) {
5880                 goto illegal_op;
5881             }
5882             gen_set_eflags(s, AC_MASK);
5883             s->base.is_jmp = DISAS_EOB_NEXT;
5884             break;
5885 
5886         CASE_MODRM_MEM_OP(1): /* sidt */
5887             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5888                 break;
5889             }
5890             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5891             gen_lea_modrm(env, s, modrm);
5892             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
5893             gen_op_st_v(s, MO_16, s->T0, s->A0);
5894             gen_add_A0_im(s, 2);
5895             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
5896             if (dflag == MO_16) {
5897                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5898             }
5899             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5900             break;
5901 
5902         case 0xd0: /* xgetbv */
5903             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5904                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5905                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5906                 goto illegal_op;
5907             }
5908             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5909             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
5910             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5911             break;
5912 
5913         case 0xd1: /* xsetbv */
5914             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5915                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5916                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5917                 goto illegal_op;
5918             }
5919             if (!check_cpl0(s)) {
5920                 break;
5921             }
5922             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5923                                   cpu_regs[R_EDX]);
5924             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5925             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
5926             /* End TB because translation flags may change.  */
5927             s->base.is_jmp = DISAS_EOB_NEXT;
5928             break;
5929 
5930         case 0xd8: /* VMRUN */
5931             if (!SVME(s) || !PE(s)) {
5932                 goto illegal_op;
5933             }
5934             if (!check_cpl0(s)) {
5935                 break;
5936             }
5937             gen_update_cc_op(s);
5938             gen_update_eip_cur(s);
5939             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
5940                              cur_insn_len_i32(s));
5941             tcg_gen_exit_tb(NULL, 0);
5942             s->base.is_jmp = DISAS_NORETURN;
5943             break;
5944 
5945         case 0xd9: /* VMMCALL */
5946             if (!SVME(s)) {
5947                 goto illegal_op;
5948             }
5949             gen_update_cc_op(s);
5950             gen_update_eip_cur(s);
5951             gen_helper_vmmcall(tcg_env);
5952             break;
5953 
5954         case 0xda: /* VMLOAD */
5955             if (!SVME(s) || !PE(s)) {
5956                 goto illegal_op;
5957             }
5958             if (!check_cpl0(s)) {
5959                 break;
5960             }
5961             gen_update_cc_op(s);
5962             gen_update_eip_cur(s);
5963             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
5964             break;
5965 
5966         case 0xdb: /* VMSAVE */
5967             if (!SVME(s) || !PE(s)) {
5968                 goto illegal_op;
5969             }
5970             if (!check_cpl0(s)) {
5971                 break;
5972             }
5973             gen_update_cc_op(s);
5974             gen_update_eip_cur(s);
5975             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
5976             break;
5977 
5978         case 0xdc: /* STGI */
5979             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5980                 || !PE(s)) {
5981                 goto illegal_op;
5982             }
5983             if (!check_cpl0(s)) {
5984                 break;
5985             }
5986             gen_update_cc_op(s);
5987             gen_helper_stgi(tcg_env);
5988             s->base.is_jmp = DISAS_EOB_NEXT;
5989             break;
5990 
5991         case 0xdd: /* CLGI */
5992             if (!SVME(s) || !PE(s)) {
5993                 goto illegal_op;
5994             }
5995             if (!check_cpl0(s)) {
5996                 break;
5997             }
5998             gen_update_cc_op(s);
5999             gen_update_eip_cur(s);
6000             gen_helper_clgi(tcg_env);
6001             break;
6002 
6003         case 0xde: /* SKINIT */
6004             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
6005                 || !PE(s)) {
6006                 goto illegal_op;
6007             }
6008             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6009             /* If not intercepted, not implemented -- raise #UD. */
6010             goto illegal_op;
6011 
6012         case 0xdf: /* INVLPGA */
6013             if (!SVME(s) || !PE(s)) {
6014                 goto illegal_op;
6015             }
6016             if (!check_cpl0(s)) {
6017                 break;
6018             }
6019             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6020             if (s->aflag == MO_64) {
6021                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6022             } else {
6023                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6024             }
6025             gen_helper_flush_page(tcg_env, s->A0);
6026             s->base.is_jmp = DISAS_EOB_NEXT;
6027             break;
6028 
6029         CASE_MODRM_MEM_OP(2): /* lgdt */
6030             if (!check_cpl0(s)) {
6031                 break;
6032             }
6033             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6034             gen_lea_modrm(env, s, modrm);
6035             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6036             gen_add_A0_im(s, 2);
6037             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6038             if (dflag == MO_16) {
6039                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6040             }
6041             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6042             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
6043             break;
6044 
6045         CASE_MODRM_MEM_OP(3): /* lidt */
6046             if (!check_cpl0(s)) {
6047                 break;
6048             }
6049             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6050             gen_lea_modrm(env, s, modrm);
6051             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6052             gen_add_A0_im(s, 2);
6053             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6054             if (dflag == MO_16) {
6055                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6056             }
6057             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6058             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
6059             break;
6060 
6061         CASE_MODRM_OP(4): /* smsw */
6062             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6063                 break;
6064             }
6065             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6066             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
6067             /*
6068              * In 32-bit mode, the higher 16 bits of the destination
6069              * register are undefined.  In practice CR0[31:0] is stored
6070              * just like in 64-bit mode.
6071              */
6072             mod = (modrm >> 6) & 3;
6073             ot = (mod != 3 ? MO_16 : s->dflag);
6074             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6075             break;
6076         case 0xee: /* rdpkru */
6077             if (prefixes & PREFIX_LOCK) {
6078                 goto illegal_op;
6079             }
6080             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6081             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
6082             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6083             break;
6084         case 0xef: /* wrpkru */
6085             if (prefixes & PREFIX_LOCK) {
6086                 goto illegal_op;
6087             }
6088             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6089                                   cpu_regs[R_EDX]);
6090             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6091             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
6092             break;
6093 
6094         CASE_MODRM_OP(6): /* lmsw */
6095             if (!check_cpl0(s)) {
6096                 break;
6097             }
6098             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6099             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6100             /*
6101              * Only the 4 lower bits of CR0 are modified.
6102              * PE cannot be set to zero if already set to one.
6103              */
6104             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
6105             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6106             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6107             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6108             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
6109             s->base.is_jmp = DISAS_EOB_NEXT;
6110             break;
6111 
6112         CASE_MODRM_MEM_OP(7): /* invlpg */
6113             if (!check_cpl0(s)) {
6114                 break;
6115             }
6116             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6117             gen_lea_modrm(env, s, modrm);
6118             gen_helper_flush_page(tcg_env, s->A0);
6119             s->base.is_jmp = DISAS_EOB_NEXT;
6120             break;
6121 
6122         case 0xf8: /* swapgs */
6123 #ifdef TARGET_X86_64
6124             if (CODE64(s)) {
6125                 if (check_cpl0(s)) {
6126                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6127                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
6128                                   offsetof(CPUX86State, kernelgsbase));
6129                     tcg_gen_st_tl(s->T0, tcg_env,
6130                                   offsetof(CPUX86State, kernelgsbase));
6131                 }
6132                 break;
6133             }
6134 #endif
6135             goto illegal_op;
6136 
6137         case 0xf9: /* rdtscp */
6138             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6139                 goto illegal_op;
6140             }
6141             gen_update_cc_op(s);
6142             gen_update_eip_cur(s);
6143             translator_io_start(&s->base);
6144             gen_helper_rdtsc(tcg_env);
6145             gen_helper_rdpid(s->T0, tcg_env);
6146             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6147             break;
6148 
6149         default:
6150             goto unknown_op;
6151         }
6152         break;
6153 
6154     case 0x108: /* invd */
6155     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6156         if (check_cpl0(s)) {
6157             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6158             /* nothing to do */
6159         }
6160         break;
6161     case 0x63: /* arpl or movslS (x86_64) */
6162 #ifdef TARGET_X86_64
6163         if (CODE64(s)) {
6164             int d_ot;
6165             /* d_ot is the size of destination */
6166             d_ot = dflag;
6167 
6168             modrm = x86_ldub_code(env, s);
6169             reg = ((modrm >> 3) & 7) | REX_R(s);
6170             mod = (modrm >> 6) & 3;
6171             rm = (modrm & 7) | REX_B(s);
6172 
6173             if (mod == 3) {
6174                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6175                 /* sign extend */
6176                 if (d_ot == MO_64) {
6177                     tcg_gen_ext32s_tl(s->T0, s->T0);
6178                 }
6179                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6180             } else {
6181                 gen_lea_modrm(env, s, modrm);
6182                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6183                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6184             }
6185         } else
6186 #endif
6187         {
6188             TCGLabel *label1;
6189             TCGv t0, t1, t2;
6190 
6191             if (!PE(s) || VM86(s))
6192                 goto illegal_op;
6193             t0 = tcg_temp_new();
6194             t1 = tcg_temp_new();
6195             t2 = tcg_temp_new();
6196             ot = MO_16;
6197             modrm = x86_ldub_code(env, s);
6198             reg = (modrm >> 3) & 7;
6199             mod = (modrm >> 6) & 3;
6200             rm = modrm & 7;
6201             if (mod != 3) {
6202                 gen_lea_modrm(env, s, modrm);
6203                 gen_op_ld_v(s, ot, t0, s->A0);
6204             } else {
6205                 gen_op_mov_v_reg(s, ot, t0, rm);
6206             }
6207             gen_op_mov_v_reg(s, ot, t1, reg);
6208             tcg_gen_andi_tl(s->tmp0, t0, 3);
6209             tcg_gen_andi_tl(t1, t1, 3);
6210             tcg_gen_movi_tl(t2, 0);
6211             label1 = gen_new_label();
6212             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6213             tcg_gen_andi_tl(t0, t0, ~3);
6214             tcg_gen_or_tl(t0, t0, t1);
6215             tcg_gen_movi_tl(t2, CC_Z);
6216             gen_set_label(label1);
6217             if (mod != 3) {
6218                 gen_op_st_v(s, ot, t0, s->A0);
6219            } else {
6220                 gen_op_mov_reg_v(s, ot, rm, t0);
6221             }
6222             gen_compute_eflags(s);
6223             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6224             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6225         }
6226         break;
6227     case 0x102: /* lar */
6228     case 0x103: /* lsl */
6229         {
6230             TCGLabel *label1;
6231             TCGv t0;
6232             if (!PE(s) || VM86(s))
6233                 goto illegal_op;
6234             ot = dflag != MO_16 ? MO_32 : MO_16;
6235             modrm = x86_ldub_code(env, s);
6236             reg = ((modrm >> 3) & 7) | REX_R(s);
6237             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6238             t0 = tcg_temp_new();
6239             gen_update_cc_op(s);
6240             if (b == 0x102) {
6241                 gen_helper_lar(t0, tcg_env, s->T0);
6242             } else {
6243                 gen_helper_lsl(t0, tcg_env, s->T0);
6244             }
6245             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6246             label1 = gen_new_label();
6247             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6248             gen_op_mov_reg_v(s, ot, reg, t0);
6249             gen_set_label(label1);
6250             set_cc_op(s, CC_OP_EFLAGS);
6251         }
6252         break;
6253     case 0x118:
6254         modrm = x86_ldub_code(env, s);
6255         mod = (modrm >> 6) & 3;
6256         op = (modrm >> 3) & 7;
6257         switch(op) {
6258         case 0: /* prefetchnta */
6259         case 1: /* prefetchnt0 */
6260         case 2: /* prefetchnt0 */
6261         case 3: /* prefetchnt0 */
6262             if (mod == 3)
6263                 goto illegal_op;
6264             gen_nop_modrm(env, s, modrm);
6265             /* nothing more to do */
6266             break;
6267         default: /* nop (multi byte) */
6268             gen_nop_modrm(env, s, modrm);
6269             break;
6270         }
6271         break;
6272     case 0x11a:
6273         modrm = x86_ldub_code(env, s);
6274         if (s->flags & HF_MPX_EN_MASK) {
6275             mod = (modrm >> 6) & 3;
6276             reg = ((modrm >> 3) & 7) | REX_R(s);
6277             if (prefixes & PREFIX_REPZ) {
6278                 /* bndcl */
6279                 if (reg >= 4
6280                     || (prefixes & PREFIX_LOCK)
6281                     || s->aflag == MO_16) {
6282                     goto illegal_op;
6283                 }
6284                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6285             } else if (prefixes & PREFIX_REPNZ) {
6286                 /* bndcu */
6287                 if (reg >= 4
6288                     || (prefixes & PREFIX_LOCK)
6289                     || s->aflag == MO_16) {
6290                     goto illegal_op;
6291                 }
6292                 TCGv_i64 notu = tcg_temp_new_i64();
6293                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6294                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6295             } else if (prefixes & PREFIX_DATA) {
6296                 /* bndmov -- from reg/mem */
6297                 if (reg >= 4 || s->aflag == MO_16) {
6298                     goto illegal_op;
6299                 }
6300                 if (mod == 3) {
6301                     int reg2 = (modrm & 7) | REX_B(s);
6302                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6303                         goto illegal_op;
6304                     }
6305                     if (s->flags & HF_MPX_IU_MASK) {
6306                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6307                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6308                     }
6309                 } else {
6310                     gen_lea_modrm(env, s, modrm);
6311                     if (CODE64(s)) {
6312                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6313                                             s->mem_index, MO_LEUQ);
6314                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6315                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6316                                             s->mem_index, MO_LEUQ);
6317                     } else {
6318                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6319                                             s->mem_index, MO_LEUL);
6320                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6321                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6322                                             s->mem_index, MO_LEUL);
6323                     }
6324                     /* bnd registers are now in-use */
6325                     gen_set_hflag(s, HF_MPX_IU_MASK);
6326                 }
6327             } else if (mod != 3) {
6328                 /* bndldx */
6329                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6330                 if (reg >= 4
6331                     || (prefixes & PREFIX_LOCK)
6332                     || s->aflag == MO_16
6333                     || a.base < -1) {
6334                     goto illegal_op;
6335                 }
6336                 if (a.base >= 0) {
6337                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6338                 } else {
6339                     tcg_gen_movi_tl(s->A0, 0);
6340                 }
6341                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6342                 if (a.index >= 0) {
6343                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6344                 } else {
6345                     tcg_gen_movi_tl(s->T0, 0);
6346                 }
6347                 if (CODE64(s)) {
6348                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6349                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
6350                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6351                 } else {
6352                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
6353                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6354                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6355                 }
6356                 gen_set_hflag(s, HF_MPX_IU_MASK);
6357             }
6358         }
6359         gen_nop_modrm(env, s, modrm);
6360         break;
6361     case 0x11b:
6362         modrm = x86_ldub_code(env, s);
6363         if (s->flags & HF_MPX_EN_MASK) {
6364             mod = (modrm >> 6) & 3;
6365             reg = ((modrm >> 3) & 7) | REX_R(s);
6366             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6367                 /* bndmk */
6368                 if (reg >= 4
6369                     || (prefixes & PREFIX_LOCK)
6370                     || s->aflag == MO_16) {
6371                     goto illegal_op;
6372                 }
6373                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6374                 if (a.base >= 0) {
6375                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6376                     if (!CODE64(s)) {
6377                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6378                     }
6379                 } else if (a.base == -1) {
6380                     /* no base register has lower bound of 0 */
6381                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6382                 } else {
6383                     /* rip-relative generates #ud */
6384                     goto illegal_op;
6385                 }
6386                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6387                 if (!CODE64(s)) {
6388                     tcg_gen_ext32u_tl(s->A0, s->A0);
6389                 }
6390                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6391                 /* bnd registers are now in-use */
6392                 gen_set_hflag(s, HF_MPX_IU_MASK);
6393                 break;
6394             } else if (prefixes & PREFIX_REPNZ) {
6395                 /* bndcn */
6396                 if (reg >= 4
6397                     || (prefixes & PREFIX_LOCK)
6398                     || s->aflag == MO_16) {
6399                     goto illegal_op;
6400                 }
6401                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6402             } else if (prefixes & PREFIX_DATA) {
6403                 /* bndmov -- to reg/mem */
6404                 if (reg >= 4 || s->aflag == MO_16) {
6405                     goto illegal_op;
6406                 }
6407                 if (mod == 3) {
6408                     int reg2 = (modrm & 7) | REX_B(s);
6409                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6410                         goto illegal_op;
6411                     }
6412                     if (s->flags & HF_MPX_IU_MASK) {
6413                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6414                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6415                     }
6416                 } else {
6417                     gen_lea_modrm(env, s, modrm);
6418                     if (CODE64(s)) {
6419                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6420                                             s->mem_index, MO_LEUQ);
6421                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6422                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6423                                             s->mem_index, MO_LEUQ);
6424                     } else {
6425                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6426                                             s->mem_index, MO_LEUL);
6427                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6428                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6429                                             s->mem_index, MO_LEUL);
6430                     }
6431                 }
6432             } else if (mod != 3) {
6433                 /* bndstx */
6434                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6435                 if (reg >= 4
6436                     || (prefixes & PREFIX_LOCK)
6437                     || s->aflag == MO_16
6438                     || a.base < -1) {
6439                     goto illegal_op;
6440                 }
6441                 if (a.base >= 0) {
6442                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6443                 } else {
6444                     tcg_gen_movi_tl(s->A0, 0);
6445                 }
6446                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6447                 if (a.index >= 0) {
6448                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6449                 } else {
6450                     tcg_gen_movi_tl(s->T0, 0);
6451                 }
6452                 if (CODE64(s)) {
6453                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
6454                                         cpu_bndl[reg], cpu_bndu[reg]);
6455                 } else {
6456                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
6457                                         cpu_bndl[reg], cpu_bndu[reg]);
6458                 }
6459             }
6460         }
6461         gen_nop_modrm(env, s, modrm);
6462         break;
6463     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6464         modrm = x86_ldub_code(env, s);
6465         gen_nop_modrm(env, s, modrm);
6466         break;
6467 
6468     case 0x120: /* mov reg, crN */
6469     case 0x122: /* mov crN, reg */
6470         if (!check_cpl0(s)) {
6471             break;
6472         }
6473         modrm = x86_ldub_code(env, s);
6474         /*
6475          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6476          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6477          * processors all show that the mod bits are assumed to be 1's,
6478          * regardless of actual values.
6479          */
6480         rm = (modrm & 7) | REX_B(s);
6481         reg = ((modrm >> 3) & 7) | REX_R(s);
6482         switch (reg) {
6483         case 0:
6484             if ((prefixes & PREFIX_LOCK) &&
6485                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6486                 reg = 8;
6487             }
6488             break;
6489         case 2:
6490         case 3:
6491         case 4:
6492         case 8:
6493             break;
6494         default:
6495             goto unknown_op;
6496         }
6497         ot  = (CODE64(s) ? MO_64 : MO_32);
6498 
6499         translator_io_start(&s->base);
6500         if (b & 2) {
6501             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6502             gen_op_mov_v_reg(s, ot, s->T0, rm);
6503             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
6504             s->base.is_jmp = DISAS_EOB_NEXT;
6505         } else {
6506             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6507             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
6508             gen_op_mov_reg_v(s, ot, rm, s->T0);
6509         }
6510         break;
6511 
6512     case 0x121: /* mov reg, drN */
6513     case 0x123: /* mov drN, reg */
6514         if (check_cpl0(s)) {
6515             modrm = x86_ldub_code(env, s);
6516             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6517              * AMD documentation (24594.pdf) and testing of
6518              * intel 386 and 486 processors all show that the mod bits
6519              * are assumed to be 1's, regardless of actual values.
6520              */
6521             rm = (modrm & 7) | REX_B(s);
6522             reg = ((modrm >> 3) & 7) | REX_R(s);
6523             if (CODE64(s))
6524                 ot = MO_64;
6525             else
6526                 ot = MO_32;
6527             if (reg >= 8) {
6528                 goto illegal_op;
6529             }
6530             if (b & 2) {
6531                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6532                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6533                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6534                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
6535                 s->base.is_jmp = DISAS_EOB_NEXT;
6536             } else {
6537                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6538                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6539                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
6540                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6541             }
6542         }
6543         break;
6544     case 0x106: /* clts */
6545         if (check_cpl0(s)) {
6546             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6547             gen_helper_clts(tcg_env);
6548             /* abort block because static cpu state changed */
6549             s->base.is_jmp = DISAS_EOB_NEXT;
6550         }
6551         break;
6552     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6553     case 0x1c3: /* MOVNTI reg, mem */
6554         if (!(s->cpuid_features & CPUID_SSE2))
6555             goto illegal_op;
6556         ot = mo_64_32(dflag);
6557         modrm = x86_ldub_code(env, s);
6558         mod = (modrm >> 6) & 3;
6559         if (mod == 3)
6560             goto illegal_op;
6561         reg = ((modrm >> 3) & 7) | REX_R(s);
6562         /* generate a generic store */
6563         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6564         break;
6565     case 0x1ae:
6566         modrm = x86_ldub_code(env, s);
6567         switch (modrm) {
6568         CASE_MODRM_MEM_OP(0): /* fxsave */
6569             if (!(s->cpuid_features & CPUID_FXSR)
6570                 || (prefixes & PREFIX_LOCK)) {
6571                 goto illegal_op;
6572             }
6573             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6574                 gen_exception(s, EXCP07_PREX);
6575                 break;
6576             }
6577             gen_lea_modrm(env, s, modrm);
6578             gen_helper_fxsave(tcg_env, s->A0);
6579             break;
6580 
6581         CASE_MODRM_MEM_OP(1): /* fxrstor */
6582             if (!(s->cpuid_features & CPUID_FXSR)
6583                 || (prefixes & PREFIX_LOCK)) {
6584                 goto illegal_op;
6585             }
6586             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6587                 gen_exception(s, EXCP07_PREX);
6588                 break;
6589             }
6590             gen_lea_modrm(env, s, modrm);
6591             gen_helper_fxrstor(tcg_env, s->A0);
6592             break;
6593 
6594         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6595             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6596                 goto illegal_op;
6597             }
6598             if (s->flags & HF_TS_MASK) {
6599                 gen_exception(s, EXCP07_PREX);
6600                 break;
6601             }
6602             gen_lea_modrm(env, s, modrm);
6603             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6604             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
6605             break;
6606 
6607         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6608             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6609                 goto illegal_op;
6610             }
6611             if (s->flags & HF_TS_MASK) {
6612                 gen_exception(s, EXCP07_PREX);
6613                 break;
6614             }
6615             gen_helper_update_mxcsr(tcg_env);
6616             gen_lea_modrm(env, s, modrm);
6617             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
6618             gen_op_st_v(s, MO_32, s->T0, s->A0);
6619             break;
6620 
6621         CASE_MODRM_MEM_OP(4): /* xsave */
6622             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6623                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6624                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6625                 goto illegal_op;
6626             }
6627             gen_lea_modrm(env, s, modrm);
6628             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6629                                   cpu_regs[R_EDX]);
6630             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
6631             break;
6632 
6633         CASE_MODRM_MEM_OP(5): /* xrstor */
6634             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6635                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6636                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6637                 goto illegal_op;
6638             }
6639             gen_lea_modrm(env, s, modrm);
6640             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6641                                   cpu_regs[R_EDX]);
6642             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
6643             /* XRSTOR is how MPX is enabled, which changes how
6644                we translate.  Thus we need to end the TB.  */
6645             s->base.is_jmp = DISAS_EOB_NEXT;
6646             break;
6647 
6648         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6649             if (prefixes & PREFIX_LOCK) {
6650                 goto illegal_op;
6651             }
6652             if (prefixes & PREFIX_DATA) {
6653                 /* clwb */
6654                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6655                     goto illegal_op;
6656                 }
6657                 gen_nop_modrm(env, s, modrm);
6658             } else {
6659                 /* xsaveopt */
6660                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6661                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6662                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6663                     goto illegal_op;
6664                 }
6665                 gen_lea_modrm(env, s, modrm);
6666                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6667                                       cpu_regs[R_EDX]);
6668                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
6669             }
6670             break;
6671 
6672         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6673             if (prefixes & PREFIX_LOCK) {
6674                 goto illegal_op;
6675             }
6676             if (prefixes & PREFIX_DATA) {
6677                 /* clflushopt */
6678                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6679                     goto illegal_op;
6680                 }
6681             } else {
6682                 /* clflush */
6683                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6684                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6685                     goto illegal_op;
6686                 }
6687             }
6688             gen_nop_modrm(env, s, modrm);
6689             break;
6690 
6691         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6692         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6693         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6694         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6695             if (CODE64(s)
6696                 && (prefixes & PREFIX_REPZ)
6697                 && !(prefixes & PREFIX_LOCK)
6698                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6699                 TCGv base, treg, src, dst;
6700 
6701                 /* Preserve hflags bits by testing CR4 at runtime.  */
6702                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6703                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
6704 
6705                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6706                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6707 
6708                 if (modrm & 0x10) {
6709                     /* wr*base */
6710                     dst = base, src = treg;
6711                 } else {
6712                     /* rd*base */
6713                     dst = treg, src = base;
6714                 }
6715 
6716                 if (s->dflag == MO_32) {
6717                     tcg_gen_ext32u_tl(dst, src);
6718                 } else {
6719                     tcg_gen_mov_tl(dst, src);
6720                 }
6721                 break;
6722             }
6723             goto unknown_op;
6724 
6725         case 0xf8: /* sfence / pcommit */
6726             if (prefixes & PREFIX_DATA) {
6727                 /* pcommit */
6728                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6729                     || (prefixes & PREFIX_LOCK)) {
6730                     goto illegal_op;
6731                 }
6732                 break;
6733             }
6734             /* fallthru */
6735         case 0xf9 ... 0xff: /* sfence */
6736             if (!(s->cpuid_features & CPUID_SSE)
6737                 || (prefixes & PREFIX_LOCK)) {
6738                 goto illegal_op;
6739             }
6740             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6741             break;
6742         case 0xe8 ... 0xef: /* lfence */
6743             if (!(s->cpuid_features & CPUID_SSE)
6744                 || (prefixes & PREFIX_LOCK)) {
6745                 goto illegal_op;
6746             }
6747             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6748             break;
6749         case 0xf0 ... 0xf7: /* mfence */
6750             if (!(s->cpuid_features & CPUID_SSE2)
6751                 || (prefixes & PREFIX_LOCK)) {
6752                 goto illegal_op;
6753             }
6754             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6755             break;
6756 
6757         default:
6758             goto unknown_op;
6759         }
6760         break;
6761 
6762     case 0x10d: /* 3DNow! prefetch(w) */
6763         modrm = x86_ldub_code(env, s);
6764         mod = (modrm >> 6) & 3;
6765         if (mod == 3)
6766             goto illegal_op;
6767         gen_nop_modrm(env, s, modrm);
6768         break;
6769     case 0x1aa: /* rsm */
6770         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6771         if (!(s->flags & HF_SMM_MASK))
6772             goto illegal_op;
6773 #ifdef CONFIG_USER_ONLY
6774         /* we should not be in SMM mode */
6775         g_assert_not_reached();
6776 #else
6777         gen_update_cc_op(s);
6778         gen_update_eip_next(s);
6779         gen_helper_rsm(tcg_env);
6780 #endif /* CONFIG_USER_ONLY */
6781         s->base.is_jmp = DISAS_EOB_ONLY;
6782         break;
6783     case 0x1b8: /* SSE4.2 popcnt */
6784         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6785              PREFIX_REPZ)
6786             goto illegal_op;
6787         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6788             goto illegal_op;
6789 
6790         modrm = x86_ldub_code(env, s);
6791         reg = ((modrm >> 3) & 7) | REX_R(s);
6792 
6793         if (s->prefix & PREFIX_DATA) {
6794             ot = MO_16;
6795         } else {
6796             ot = mo_64_32(dflag);
6797         }
6798 
6799         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6800         gen_extu(ot, s->T0);
6801         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6802         tcg_gen_ctpop_tl(s->T0, s->T0);
6803         gen_op_mov_reg_v(s, ot, reg, s->T0);
6804 
6805         set_cc_op(s, CC_OP_POPCNT);
6806         break;
6807     case 0x10e ... 0x117:
6808     case 0x128 ... 0x12f:
6809     case 0x138 ... 0x13a:
6810     case 0x150 ... 0x179:
6811     case 0x17c ... 0x17f:
6812     case 0x1c2:
6813     case 0x1c4 ... 0x1c6:
6814     case 0x1d0 ... 0x1fe:
6815         disas_insn_new(s, cpu, b);
6816         break;
6817     default:
6818         goto unknown_op;
6819     }
6820     return true;
6821  illegal_op:
6822     gen_illegal_opcode(s);
6823     return true;
6824  unknown_op:
6825     gen_unknown_opcode(env, s);
6826     return true;
6827 }
6828 
6829 void tcg_x86_init(void)
6830 {
6831     static const char reg_names[CPU_NB_REGS][4] = {
6832 #ifdef TARGET_X86_64
6833         [R_EAX] = "rax",
6834         [R_EBX] = "rbx",
6835         [R_ECX] = "rcx",
6836         [R_EDX] = "rdx",
6837         [R_ESI] = "rsi",
6838         [R_EDI] = "rdi",
6839         [R_EBP] = "rbp",
6840         [R_ESP] = "rsp",
6841         [8]  = "r8",
6842         [9]  = "r9",
6843         [10] = "r10",
6844         [11] = "r11",
6845         [12] = "r12",
6846         [13] = "r13",
6847         [14] = "r14",
6848         [15] = "r15",
6849 #else
6850         [R_EAX] = "eax",
6851         [R_EBX] = "ebx",
6852         [R_ECX] = "ecx",
6853         [R_EDX] = "edx",
6854         [R_ESI] = "esi",
6855         [R_EDI] = "edi",
6856         [R_EBP] = "ebp",
6857         [R_ESP] = "esp",
6858 #endif
6859     };
6860     static const char eip_name[] = {
6861 #ifdef TARGET_X86_64
6862         "rip"
6863 #else
6864         "eip"
6865 #endif
6866     };
6867     static const char seg_base_names[6][8] = {
6868         [R_CS] = "cs_base",
6869         [R_DS] = "ds_base",
6870         [R_ES] = "es_base",
6871         [R_FS] = "fs_base",
6872         [R_GS] = "gs_base",
6873         [R_SS] = "ss_base",
6874     };
6875     static const char bnd_regl_names[4][8] = {
6876         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6877     };
6878     static const char bnd_regu_names[4][8] = {
6879         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6880     };
6881     int i;
6882 
6883     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
6884                                        offsetof(CPUX86State, cc_op), "cc_op");
6885     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
6886                                     "cc_dst");
6887     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
6888                                     "cc_src");
6889     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
6890                                      "cc_src2");
6891     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
6892 
6893     for (i = 0; i < CPU_NB_REGS; ++i) {
6894         cpu_regs[i] = tcg_global_mem_new(tcg_env,
6895                                          offsetof(CPUX86State, regs[i]),
6896                                          reg_names[i]);
6897     }
6898 
6899     for (i = 0; i < 6; ++i) {
6900         cpu_seg_base[i]
6901             = tcg_global_mem_new(tcg_env,
6902                                  offsetof(CPUX86State, segs[i].base),
6903                                  seg_base_names[i]);
6904     }
6905 
6906     for (i = 0; i < 4; ++i) {
6907         cpu_bndl[i]
6908             = tcg_global_mem_new_i64(tcg_env,
6909                                      offsetof(CPUX86State, bnd_regs[i].lb),
6910                                      bnd_regl_names[i]);
6911         cpu_bndu[i]
6912             = tcg_global_mem_new_i64(tcg_env,
6913                                      offsetof(CPUX86State, bnd_regs[i].ub),
6914                                      bnd_regu_names[i]);
6915     }
6916 }
6917 
6918 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6919 {
6920     DisasContext *dc = container_of(dcbase, DisasContext, base);
6921     CPUX86State *env = cpu_env(cpu);
6922     uint32_t flags = dc->base.tb->flags;
6923     uint32_t cflags = tb_cflags(dc->base.tb);
6924     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6925     int iopl = (flags >> IOPL_SHIFT) & 3;
6926 
6927     dc->cs_base = dc->base.tb->cs_base;
6928     dc->pc_save = dc->base.pc_next;
6929     dc->flags = flags;
6930 #ifndef CONFIG_USER_ONLY
6931     dc->cpl = cpl;
6932     dc->iopl = iopl;
6933 #endif
6934 
6935     /* We make some simplifying assumptions; validate they're correct. */
6936     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6937     g_assert(CPL(dc) == cpl);
6938     g_assert(IOPL(dc) == iopl);
6939     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6940     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6941     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6942     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6943     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6944     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6945     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6946     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6947 
6948     dc->cc_op = CC_OP_DYNAMIC;
6949     dc->cc_op_dirty = false;
6950     dc->popl_esp_hack = 0;
6951     /* select memory access functions */
6952     dc->mem_index = cpu_mmu_index(env, false);
6953     dc->cpuid_features = env->features[FEAT_1_EDX];
6954     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6955     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6956     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6957     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6958     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6959     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6960     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6961                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6962     /*
6963      * If jmp_opt, we want to handle each string instruction individually.
6964      * For icount also disable repz optimization so that each iteration
6965      * is accounted separately.
6966      */
6967     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6968 
6969     dc->T0 = tcg_temp_new();
6970     dc->T1 = tcg_temp_new();
6971     dc->A0 = tcg_temp_new();
6972 
6973     dc->tmp0 = tcg_temp_new();
6974     dc->tmp1_i64 = tcg_temp_new_i64();
6975     dc->tmp2_i32 = tcg_temp_new_i32();
6976     dc->tmp3_i32 = tcg_temp_new_i32();
6977     dc->tmp4 = tcg_temp_new();
6978     dc->cc_srcT = tcg_temp_new();
6979 }
6980 
6981 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6982 {
6983 }
6984 
6985 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6986 {
6987     DisasContext *dc = container_of(dcbase, DisasContext, base);
6988     target_ulong pc_arg = dc->base.pc_next;
6989 
6990     dc->prev_insn_end = tcg_last_op();
6991     if (tb_cflags(dcbase->tb) & CF_PCREL) {
6992         pc_arg -= dc->cs_base;
6993         pc_arg &= ~TARGET_PAGE_MASK;
6994     }
6995     tcg_gen_insn_start(pc_arg, dc->cc_op);
6996 }
6997 
6998 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6999 {
7000     DisasContext *dc = container_of(dcbase, DisasContext, base);
7001 
7002 #ifdef TARGET_VSYSCALL_PAGE
7003     /*
7004      * Detect entry into the vsyscall page and invoke the syscall.
7005      */
7006     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7007         gen_exception(dc, EXCP_VSYSCALL);
7008         dc->base.pc_next = dc->pc + 1;
7009         return;
7010     }
7011 #endif
7012 
7013     if (disas_insn(dc, cpu)) {
7014         target_ulong pc_next = dc->pc;
7015         dc->base.pc_next = pc_next;
7016 
7017         if (dc->base.is_jmp == DISAS_NEXT) {
7018             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7019                 /*
7020                  * If single step mode, we generate only one instruction and
7021                  * generate an exception.
7022                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7023                  * the flag and abort the translation to give the irqs a
7024                  * chance to happen.
7025                  */
7026                 dc->base.is_jmp = DISAS_EOB_NEXT;
7027             } else if (!is_same_page(&dc->base, pc_next)) {
7028                 dc->base.is_jmp = DISAS_TOO_MANY;
7029             }
7030         }
7031     }
7032 }
7033 
7034 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7035 {
7036     DisasContext *dc = container_of(dcbase, DisasContext, base);
7037 
7038     switch (dc->base.is_jmp) {
7039     case DISAS_NORETURN:
7040         break;
7041     case DISAS_TOO_MANY:
7042         gen_update_cc_op(dc);
7043         gen_jmp_rel_csize(dc, 0, 0);
7044         break;
7045     case DISAS_EOB_NEXT:
7046         gen_update_cc_op(dc);
7047         gen_update_eip_cur(dc);
7048         /* fall through */
7049     case DISAS_EOB_ONLY:
7050         gen_eob(dc);
7051         break;
7052     case DISAS_EOB_INHIBIT_IRQ:
7053         gen_update_cc_op(dc);
7054         gen_update_eip_cur(dc);
7055         gen_eob_inhibit_irq(dc, true);
7056         break;
7057     case DISAS_JUMP:
7058         gen_jr(dc);
7059         break;
7060     default:
7061         g_assert_not_reached();
7062     }
7063 }
7064 
7065 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7066                               CPUState *cpu, FILE *logfile)
7067 {
7068     DisasContext *dc = container_of(dcbase, DisasContext, base);
7069 
7070     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7071     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7072 }
7073 
7074 static const TranslatorOps i386_tr_ops = {
7075     .init_disas_context = i386_tr_init_disas_context,
7076     .tb_start           = i386_tr_tb_start,
7077     .insn_start         = i386_tr_insn_start,
7078     .translate_insn     = i386_tr_translate_insn,
7079     .tb_stop            = i386_tr_tb_stop,
7080     .disas_log          = i386_tr_disas_log,
7081 };
7082 
7083 /* generate intermediate code for basic block 'tb'.  */
7084 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7085                            target_ulong pc, void *host_pc)
7086 {
7087     DisasContext dc;
7088 
7089     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7090 }
7091