xref: /qemu/target/i386/tcg/translate.c (revision aa903cf3)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_xsave_features;
126 
127     /* TCG local temps */
128     TCGv cc_srcT;
129     TCGv A0;
130     TCGv T0;
131     TCGv T1;
132 
133     /* TCG local register indexes (only used inside old micro ops) */
134     TCGv tmp0;
135     TCGv tmp4;
136     TCGv_i32 tmp2_i32;
137     TCGv_i32 tmp3_i32;
138     TCGv_i64 tmp1_i64;
139 
140     sigjmp_buf jmpbuf;
141     TCGOp *prev_insn_end;
142 } DisasContext;
143 
144 #define DISAS_EOB_ONLY         DISAS_TARGET_0
145 #define DISAS_EOB_NEXT         DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
147 #define DISAS_JUMP             DISAS_TARGET_3
148 
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
151 #define PE(S)     true
152 #define CPL(S)    3
153 #define IOPL(S)   0
154 #define SVME(S)   false
155 #define GUEST(S)  false
156 #else
157 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S)    ((S)->cpl)
159 #define IOPL(S)   ((S)->iopl)
160 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
162 #endif
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S)   false
165 #define CODE32(S) true
166 #define SS32(S)   true
167 #define ADDSEG(S) false
168 #else
169 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
173 #endif
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #elif defined(CONFIG_USER_ONLY)
177 #define CODE64(S) true
178 #else
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
180 #endif
181 #if defined(CONFIG_SOFTMMU) && !defined(TARGET_X86_64)
182 #define LMA(S)    false
183 #else
184 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
185 #endif
186 
187 #ifdef TARGET_X86_64
188 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
189 #define REX_W(S)       ((S)->vex_w)
190 #define REX_R(S)       ((S)->rex_r + 0)
191 #define REX_X(S)       ((S)->rex_x + 0)
192 #define REX_B(S)       ((S)->rex_b + 0)
193 #else
194 #define REX_PREFIX(S)  false
195 #define REX_W(S)       false
196 #define REX_R(S)       0
197 #define REX_X(S)       0
198 #define REX_B(S)       0
199 #endif
200 
201 /*
202  * Many sysemu-only helpers are not reachable for user-only.
203  * Define stub generators here, so that we need not either sprinkle
204  * ifdefs through the translator, nor provide the helper function.
205  */
206 #define STUB_HELPER(NAME, ...) \
207     static inline void gen_helper_##NAME(__VA_ARGS__) \
208     { qemu_build_not_reached(); }
209 
210 #ifdef CONFIG_USER_ONLY
211 STUB_HELPER(clgi, TCGv_env env)
212 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
213 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
214 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
215 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
216 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
218 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
219 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
220 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
221 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(rdmsr, TCGv_env env)
223 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
224 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
225 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
226 STUB_HELPER(stgi, TCGv_env env)
227 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
228 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
229 STUB_HELPER(vmmcall, TCGv_env env)
230 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
231 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
232 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
233 STUB_HELPER(wrmsr, TCGv_env env)
234 #endif
235 
236 static void gen_eob(DisasContext *s);
237 static void gen_jr(DisasContext *s);
238 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
239 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
240 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
241 static void gen_exception_gpf(DisasContext *s);
242 
243 /* i386 arith/logic operations */
244 enum {
245     OP_ADDL,
246     OP_ORL,
247     OP_ADCL,
248     OP_SBBL,
249     OP_ANDL,
250     OP_SUBL,
251     OP_XORL,
252     OP_CMPL,
253 };
254 
255 /* i386 shift ops */
256 enum {
257     OP_ROL,
258     OP_ROR,
259     OP_RCL,
260     OP_RCR,
261     OP_SHL,
262     OP_SHR,
263     OP_SHL1, /* undocumented */
264     OP_SAR = 7,
265 };
266 
267 enum {
268     JCC_O,
269     JCC_B,
270     JCC_Z,
271     JCC_BE,
272     JCC_S,
273     JCC_P,
274     JCC_L,
275     JCC_LE,
276 };
277 
278 enum {
279     /* I386 int registers */
280     OR_EAX,   /* MUST be even numbered */
281     OR_ECX,
282     OR_EDX,
283     OR_EBX,
284     OR_ESP,
285     OR_EBP,
286     OR_ESI,
287     OR_EDI,
288 
289     OR_TMP0 = 16,    /* temporary operand register */
290     OR_TMP1,
291     OR_A0, /* temporary register used when doing address evaluation */
292 };
293 
294 enum {
295     USES_CC_DST  = 1,
296     USES_CC_SRC  = 2,
297     USES_CC_SRC2 = 4,
298     USES_CC_SRCT = 8,
299 };
300 
301 /* Bit set if the global variable is live after setting CC_OP to X.  */
302 static const uint8_t cc_op_live[CC_OP_NB] = {
303     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
304     [CC_OP_EFLAGS] = USES_CC_SRC,
305     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
306     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
307     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
308     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
309     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
311     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
312     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
313     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
318     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
319     [CC_OP_CLR] = 0,
320     [CC_OP_POPCNT] = USES_CC_SRC,
321 };
322 
323 static void set_cc_op(DisasContext *s, CCOp op)
324 {
325     int dead;
326 
327     if (s->cc_op == op) {
328         return;
329     }
330 
331     /* Discard CC computation that will no longer be used.  */
332     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
333     if (dead & USES_CC_DST) {
334         tcg_gen_discard_tl(cpu_cc_dst);
335     }
336     if (dead & USES_CC_SRC) {
337         tcg_gen_discard_tl(cpu_cc_src);
338     }
339     if (dead & USES_CC_SRC2) {
340         tcg_gen_discard_tl(cpu_cc_src2);
341     }
342     if (dead & USES_CC_SRCT) {
343         tcg_gen_discard_tl(s->cc_srcT);
344     }
345 
346     if (op == CC_OP_DYNAMIC) {
347         /* The DYNAMIC setting is translator only, and should never be
348            stored.  Thus we always consider it clean.  */
349         s->cc_op_dirty = false;
350     } else {
351         /* Discard any computed CC_OP value (see shifts).  */
352         if (s->cc_op == CC_OP_DYNAMIC) {
353             tcg_gen_discard_i32(cpu_cc_op);
354         }
355         s->cc_op_dirty = true;
356     }
357     s->cc_op = op;
358 }
359 
360 static void gen_update_cc_op(DisasContext *s)
361 {
362     if (s->cc_op_dirty) {
363         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
364         s->cc_op_dirty = false;
365     }
366 }
367 
368 #ifdef TARGET_X86_64
369 
370 #define NB_OP_SIZES 4
371 
372 #else /* !TARGET_X86_64 */
373 
374 #define NB_OP_SIZES 3
375 
376 #endif /* !TARGET_X86_64 */
377 
378 #if HOST_BIG_ENDIAN
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
384 #else
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
390 #endif
391 
392 /* In instruction encodings for byte register accesses the
393  * register number usually indicates "low 8 bits of register N";
394  * however there are some special cases where N 4..7 indicates
395  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396  * true for this special case, false otherwise.
397  */
398 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
399 {
400     /* Any time the REX prefix is present, byte registers are uniform */
401     if (reg < 4 || REX_PREFIX(s)) {
402         return false;
403     }
404     return true;
405 }
406 
407 /* Select the size of a push/pop operation.  */
408 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
409 {
410     if (CODE64(s)) {
411         return ot == MO_16 ? MO_16 : MO_64;
412     } else {
413         return ot;
414     }
415 }
416 
417 /* Select the size of the stack pointer.  */
418 static inline MemOp mo_stacksize(DisasContext *s)
419 {
420     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
421 }
422 
423 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
424 static inline MemOp mo_64_32(MemOp ot)
425 {
426 #ifdef TARGET_X86_64
427     return ot == MO_64 ? MO_64 : MO_32;
428 #else
429     return MO_32;
430 #endif
431 }
432 
433 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
434    byte vs word opcodes.  */
435 static inline MemOp mo_b_d(int b, MemOp ot)
436 {
437     return b & 1 ? ot : MO_8;
438 }
439 
440 /* Select size 8 if lsb of B is clear, else OT capped at 32.
441    Used for decoding operand size of port opcodes.  */
442 static inline MemOp mo_b_d32(int b, MemOp ot)
443 {
444     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
445 }
446 
447 /* Compute the result of writing t0 to the OT-sized register REG.
448  *
449  * If DEST is NULL, store the result into the register and return the
450  * register's TCGv.
451  *
452  * If DEST is not NULL, store the result into DEST and return the
453  * register's TCGv.
454  */
455 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
456 {
457     switch(ot) {
458     case MO_8:
459         if (byte_reg_is_xH(s, reg)) {
460             dest = dest ? dest : cpu_regs[reg - 4];
461             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
462             return cpu_regs[reg - 4];
463         }
464         dest = dest ? dest : cpu_regs[reg];
465         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
466         break;
467     case MO_16:
468         dest = dest ? dest : cpu_regs[reg];
469         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
470         break;
471     case MO_32:
472         /* For x86_64, this sets the higher half of register to zero.
473            For i386, this is equivalent to a mov. */
474         dest = dest ? dest : cpu_regs[reg];
475         tcg_gen_ext32u_tl(dest, t0);
476         break;
477 #ifdef TARGET_X86_64
478     case MO_64:
479         dest = dest ? dest : cpu_regs[reg];
480         tcg_gen_mov_tl(dest, t0);
481         break;
482 #endif
483     default:
484         g_assert_not_reached();
485     }
486     return cpu_regs[reg];
487 }
488 
489 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
490 {
491     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
492 }
493 
494 static inline
495 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
496 {
497     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
498         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
499     } else {
500         tcg_gen_mov_tl(t0, cpu_regs[reg]);
501     }
502 }
503 
504 static void gen_add_A0_im(DisasContext *s, int val)
505 {
506     tcg_gen_addi_tl(s->A0, s->A0, val);
507     if (!CODE64(s)) {
508         tcg_gen_ext32u_tl(s->A0, s->A0);
509     }
510 }
511 
512 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
513 {
514     tcg_gen_mov_tl(cpu_eip, dest);
515     s->pc_save = -1;
516 }
517 
518 static inline
519 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
520 {
521     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
522     gen_op_mov_reg_v(s, size, reg, s->tmp0);
523 }
524 
525 static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg)
526 {
527     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0);
528     gen_op_mov_reg_v(s, size, reg, s->tmp0);
529 }
530 
531 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
532 {
533     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
534 }
535 
536 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
537 {
538     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
539 }
540 
541 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
542 {
543     if (d == OR_TMP0) {
544         gen_op_st_v(s, idx, s->T0, s->A0);
545     } else {
546         gen_op_mov_reg_v(s, idx, d, s->T0);
547     }
548 }
549 
550 static void gen_update_eip_cur(DisasContext *s)
551 {
552     assert(s->pc_save != -1);
553     if (tb_cflags(s->base.tb) & CF_PCREL) {
554         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
555     } else {
556         tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
557     }
558     s->pc_save = s->base.pc_next;
559 }
560 
561 static void gen_update_eip_next(DisasContext *s)
562 {
563     assert(s->pc_save != -1);
564     if (tb_cflags(s->base.tb) & CF_PCREL) {
565         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
566     } else {
567         tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
568     }
569     s->pc_save = s->pc;
570 }
571 
572 static int cur_insn_len(DisasContext *s)
573 {
574     return s->pc - s->base.pc_next;
575 }
576 
577 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
578 {
579     return tcg_constant_i32(cur_insn_len(s));
580 }
581 
582 static TCGv_i32 eip_next_i32(DisasContext *s)
583 {
584     assert(s->pc_save != -1);
585     /*
586      * This function has two users: lcall_real (always 16-bit mode), and
587      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
588      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
589      * why passing a 32-bit value isn't broken.  To avoid using this where
590      * we shouldn't, return -1 in 64-bit mode so that execution goes into
591      * the weeds quickly.
592      */
593     if (CODE64(s)) {
594         return tcg_constant_i32(-1);
595     }
596     if (tb_cflags(s->base.tb) & CF_PCREL) {
597         TCGv_i32 ret = tcg_temp_new_i32();
598         tcg_gen_trunc_tl_i32(ret, cpu_eip);
599         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
600         return ret;
601     } else {
602         return tcg_constant_i32(s->pc - s->cs_base);
603     }
604 }
605 
606 static TCGv eip_next_tl(DisasContext *s)
607 {
608     assert(s->pc_save != -1);
609     if (tb_cflags(s->base.tb) & CF_PCREL) {
610         TCGv ret = tcg_temp_new();
611         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
612         return ret;
613     } else {
614         return tcg_constant_tl(s->pc - s->cs_base);
615     }
616 }
617 
618 static TCGv eip_cur_tl(DisasContext *s)
619 {
620     assert(s->pc_save != -1);
621     if (tb_cflags(s->base.tb) & CF_PCREL) {
622         TCGv ret = tcg_temp_new();
623         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
624         return ret;
625     } else {
626         return tcg_constant_tl(s->base.pc_next - s->cs_base);
627     }
628 }
629 
630 /* Compute SEG:REG into A0.  SEG is selected from the override segment
631    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
632    indicate no override.  */
633 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
634                           int def_seg, int ovr_seg)
635 {
636     switch (aflag) {
637 #ifdef TARGET_X86_64
638     case MO_64:
639         if (ovr_seg < 0) {
640             tcg_gen_mov_tl(s->A0, a0);
641             return;
642         }
643         break;
644 #endif
645     case MO_32:
646         /* 32 bit address */
647         if (ovr_seg < 0 && ADDSEG(s)) {
648             ovr_seg = def_seg;
649         }
650         if (ovr_seg < 0) {
651             tcg_gen_ext32u_tl(s->A0, a0);
652             return;
653         }
654         break;
655     case MO_16:
656         /* 16 bit address */
657         tcg_gen_ext16u_tl(s->A0, a0);
658         a0 = s->A0;
659         if (ovr_seg < 0) {
660             if (ADDSEG(s)) {
661                 ovr_seg = def_seg;
662             } else {
663                 return;
664             }
665         }
666         break;
667     default:
668         g_assert_not_reached();
669     }
670 
671     if (ovr_seg >= 0) {
672         TCGv seg = cpu_seg_base[ovr_seg];
673 
674         if (aflag == MO_64) {
675             tcg_gen_add_tl(s->A0, a0, seg);
676         } else if (CODE64(s)) {
677             tcg_gen_ext32u_tl(s->A0, a0);
678             tcg_gen_add_tl(s->A0, s->A0, seg);
679         } else {
680             tcg_gen_add_tl(s->A0, a0, seg);
681             tcg_gen_ext32u_tl(s->A0, s->A0);
682         }
683     }
684 }
685 
686 static inline void gen_string_movl_A0_ESI(DisasContext *s)
687 {
688     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
689 }
690 
691 static inline void gen_string_movl_A0_EDI(DisasContext *s)
692 {
693     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
694 }
695 
696 static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot)
697 {
698     tcg_gen_ld32s_tl(s->T0, cpu_env, offsetof(CPUX86State, df));
699     tcg_gen_shli_tl(s->T0, s->T0, ot);
700 };
701 
702 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
703 {
704     switch (size) {
705     case MO_8:
706         if (sign) {
707             tcg_gen_ext8s_tl(dst, src);
708         } else {
709             tcg_gen_ext8u_tl(dst, src);
710         }
711         return dst;
712     case MO_16:
713         if (sign) {
714             tcg_gen_ext16s_tl(dst, src);
715         } else {
716             tcg_gen_ext16u_tl(dst, src);
717         }
718         return dst;
719 #ifdef TARGET_X86_64
720     case MO_32:
721         if (sign) {
722             tcg_gen_ext32s_tl(dst, src);
723         } else {
724             tcg_gen_ext32u_tl(dst, src);
725         }
726         return dst;
727 #endif
728     default:
729         return src;
730     }
731 }
732 
733 static void gen_extu(MemOp ot, TCGv reg)
734 {
735     gen_ext_tl(reg, reg, ot, false);
736 }
737 
738 static void gen_exts(MemOp ot, TCGv reg)
739 {
740     gen_ext_tl(reg, reg, ot, true);
741 }
742 
743 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
744 {
745     tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]);
746     gen_extu(s->aflag, s->tmp0);
747     tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1);
748 }
749 
750 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
751 {
752     gen_op_j_ecx(s, TCG_COND_EQ, label1);
753 }
754 
755 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
756 {
757     gen_op_j_ecx(s, TCG_COND_NE, label1);
758 }
759 
760 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
761 {
762     switch (ot) {
763     case MO_8:
764         gen_helper_inb(v, cpu_env, n);
765         break;
766     case MO_16:
767         gen_helper_inw(v, cpu_env, n);
768         break;
769     case MO_32:
770         gen_helper_inl(v, cpu_env, n);
771         break;
772     default:
773         g_assert_not_reached();
774     }
775 }
776 
777 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
778 {
779     switch (ot) {
780     case MO_8:
781         gen_helper_outb(cpu_env, v, n);
782         break;
783     case MO_16:
784         gen_helper_outw(cpu_env, v, n);
785         break;
786     case MO_32:
787         gen_helper_outl(cpu_env, v, n);
788         break;
789     default:
790         g_assert_not_reached();
791     }
792 }
793 
794 /*
795  * Validate that access to [port, port + 1<<ot) is allowed.
796  * Raise #GP, or VMM exit if not.
797  */
798 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
799                          uint32_t svm_flags)
800 {
801 #ifdef CONFIG_USER_ONLY
802     /*
803      * We do not implement the ioperm(2) syscall, so the TSS check
804      * will always fail.
805      */
806     gen_exception_gpf(s);
807     return false;
808 #else
809     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
810         gen_helper_check_io(cpu_env, port, tcg_constant_i32(1 << ot));
811     }
812     if (GUEST(s)) {
813         gen_update_cc_op(s);
814         gen_update_eip_cur(s);
815         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
816             svm_flags |= SVM_IOIO_REP_MASK;
817         }
818         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
819         gen_helper_svm_check_io(cpu_env, port,
820                                 tcg_constant_i32(svm_flags),
821                                 cur_insn_len_i32(s));
822     }
823     return true;
824 #endif
825 }
826 
827 static void gen_movs(DisasContext *s, MemOp ot)
828 {
829     gen_string_movl_A0_ESI(s);
830     gen_op_ld_v(s, ot, s->T0, s->A0);
831     gen_string_movl_A0_EDI(s);
832     gen_op_st_v(s, ot, s->T0, s->A0);
833     gen_op_movl_T0_Dshift(s, ot);
834     gen_op_add_reg_T0(s, s->aflag, R_ESI);
835     gen_op_add_reg_T0(s, s->aflag, R_EDI);
836 }
837 
838 static void gen_op_update1_cc(DisasContext *s)
839 {
840     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
841 }
842 
843 static void gen_op_update2_cc(DisasContext *s)
844 {
845     tcg_gen_mov_tl(cpu_cc_src, s->T1);
846     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
847 }
848 
849 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
850 {
851     tcg_gen_mov_tl(cpu_cc_src2, reg);
852     tcg_gen_mov_tl(cpu_cc_src, s->T1);
853     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
854 }
855 
856 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
857 {
858     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
859 }
860 
861 static void gen_op_update_neg_cc(DisasContext *s)
862 {
863     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
864     tcg_gen_neg_tl(cpu_cc_src, s->T0);
865     tcg_gen_movi_tl(s->cc_srcT, 0);
866 }
867 
868 /* compute all eflags to cc_src */
869 static void gen_compute_eflags(DisasContext *s)
870 {
871     TCGv zero, dst, src1, src2;
872     int live, dead;
873 
874     if (s->cc_op == CC_OP_EFLAGS) {
875         return;
876     }
877     if (s->cc_op == CC_OP_CLR) {
878         tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
879         set_cc_op(s, CC_OP_EFLAGS);
880         return;
881     }
882 
883     zero = NULL;
884     dst = cpu_cc_dst;
885     src1 = cpu_cc_src;
886     src2 = cpu_cc_src2;
887 
888     /* Take care to not read values that are not live.  */
889     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
890     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
891     if (dead) {
892         zero = tcg_constant_tl(0);
893         if (dead & USES_CC_DST) {
894             dst = zero;
895         }
896         if (dead & USES_CC_SRC) {
897             src1 = zero;
898         }
899         if (dead & USES_CC_SRC2) {
900             src2 = zero;
901         }
902     }
903 
904     gen_update_cc_op(s);
905     gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
906     set_cc_op(s, CC_OP_EFLAGS);
907 }
908 
909 typedef struct CCPrepare {
910     TCGCond cond;
911     TCGv reg;
912     TCGv reg2;
913     target_ulong imm;
914     target_ulong mask;
915     bool use_reg2;
916     bool no_setcond;
917 } CCPrepare;
918 
919 /* compute eflags.C to reg */
920 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
921 {
922     TCGv t0, t1;
923     int size, shift;
924 
925     switch (s->cc_op) {
926     case CC_OP_SUBB ... CC_OP_SUBQ:
927         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
928         size = s->cc_op - CC_OP_SUBB;
929         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
930         /* If no temporary was used, be careful not to alias t1 and t0.  */
931         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
932         tcg_gen_mov_tl(t0, s->cc_srcT);
933         gen_extu(size, t0);
934         goto add_sub;
935 
936     case CC_OP_ADDB ... CC_OP_ADDQ:
937         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
938         size = s->cc_op - CC_OP_ADDB;
939         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
940         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
941     add_sub:
942         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
943                              .reg2 = t1, .mask = -1, .use_reg2 = true };
944 
945     case CC_OP_LOGICB ... CC_OP_LOGICQ:
946     case CC_OP_CLR:
947     case CC_OP_POPCNT:
948         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
949 
950     case CC_OP_INCB ... CC_OP_INCQ:
951     case CC_OP_DECB ... CC_OP_DECQ:
952         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
953                              .mask = -1, .no_setcond = true };
954 
955     case CC_OP_SHLB ... CC_OP_SHLQ:
956         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
957         size = s->cc_op - CC_OP_SHLB;
958         shift = (8 << size) - 1;
959         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
960                              .mask = (target_ulong)1 << shift };
961 
962     case CC_OP_MULB ... CC_OP_MULQ:
963         return (CCPrepare) { .cond = TCG_COND_NE,
964                              .reg = cpu_cc_src, .mask = -1 };
965 
966     case CC_OP_BMILGB ... CC_OP_BMILGQ:
967         size = s->cc_op - CC_OP_BMILGB;
968         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
969         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
970 
971     case CC_OP_ADCX:
972     case CC_OP_ADCOX:
973         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
974                              .mask = -1, .no_setcond = true };
975 
976     case CC_OP_EFLAGS:
977     case CC_OP_SARB ... CC_OP_SARQ:
978         /* CC_SRC & 1 */
979         return (CCPrepare) { .cond = TCG_COND_NE,
980                              .reg = cpu_cc_src, .mask = CC_C };
981 
982     default:
983        /* The need to compute only C from CC_OP_DYNAMIC is important
984           in efficiently implementing e.g. INC at the start of a TB.  */
985        gen_update_cc_op(s);
986        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
987                                cpu_cc_src2, cpu_cc_op);
988        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
989                             .mask = -1, .no_setcond = true };
990     }
991 }
992 
993 /* compute eflags.P to reg */
994 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
995 {
996     gen_compute_eflags(s);
997     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
998                          .mask = CC_P };
999 }
1000 
1001 /* compute eflags.S to reg */
1002 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1003 {
1004     switch (s->cc_op) {
1005     case CC_OP_DYNAMIC:
1006         gen_compute_eflags(s);
1007         /* FALLTHRU */
1008     case CC_OP_EFLAGS:
1009     case CC_OP_ADCX:
1010     case CC_OP_ADOX:
1011     case CC_OP_ADCOX:
1012         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1013                              .mask = CC_S };
1014     case CC_OP_CLR:
1015     case CC_OP_POPCNT:
1016         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1017     default:
1018         {
1019             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1020             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1021             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1022         }
1023     }
1024 }
1025 
1026 /* compute eflags.O to reg */
1027 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1028 {
1029     switch (s->cc_op) {
1030     case CC_OP_ADOX:
1031     case CC_OP_ADCOX:
1032         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1033                              .mask = -1, .no_setcond = true };
1034     case CC_OP_CLR:
1035     case CC_OP_POPCNT:
1036         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1037     default:
1038         gen_compute_eflags(s);
1039         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1040                              .mask = CC_O };
1041     }
1042 }
1043 
1044 /* compute eflags.Z to reg */
1045 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1046 {
1047     switch (s->cc_op) {
1048     case CC_OP_DYNAMIC:
1049         gen_compute_eflags(s);
1050         /* FALLTHRU */
1051     case CC_OP_EFLAGS:
1052     case CC_OP_ADCX:
1053     case CC_OP_ADOX:
1054     case CC_OP_ADCOX:
1055         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1056                              .mask = CC_Z };
1057     case CC_OP_CLR:
1058         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1059     case CC_OP_POPCNT:
1060         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1061                              .mask = -1 };
1062     default:
1063         {
1064             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1065             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1066             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1067         }
1068     }
1069 }
1070 
1071 /* perform a conditional store into register 'reg' according to jump opcode
1072    value 'b'. In the fast case, T0 is guaranted not to be used. */
1073 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1074 {
1075     int inv, jcc_op, cond;
1076     MemOp size;
1077     CCPrepare cc;
1078     TCGv t0;
1079 
1080     inv = b & 1;
1081     jcc_op = (b >> 1) & 7;
1082 
1083     switch (s->cc_op) {
1084     case CC_OP_SUBB ... CC_OP_SUBQ:
1085         /* We optimize relational operators for the cmp/jcc case.  */
1086         size = s->cc_op - CC_OP_SUBB;
1087         switch (jcc_op) {
1088         case JCC_BE:
1089             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1090             gen_extu(size, s->tmp4);
1091             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1092             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1093                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1094             break;
1095 
1096         case JCC_L:
1097             cond = TCG_COND_LT;
1098             goto fast_jcc_l;
1099         case JCC_LE:
1100             cond = TCG_COND_LE;
1101         fast_jcc_l:
1102             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1103             gen_exts(size, s->tmp4);
1104             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1105             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1106                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1107             break;
1108 
1109         default:
1110             goto slow_jcc;
1111         }
1112         break;
1113 
1114     default:
1115     slow_jcc:
1116         /* This actually generates good code for JC, JZ and JS.  */
1117         switch (jcc_op) {
1118         case JCC_O:
1119             cc = gen_prepare_eflags_o(s, reg);
1120             break;
1121         case JCC_B:
1122             cc = gen_prepare_eflags_c(s, reg);
1123             break;
1124         case JCC_Z:
1125             cc = gen_prepare_eflags_z(s, reg);
1126             break;
1127         case JCC_BE:
1128             gen_compute_eflags(s);
1129             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1130                                .mask = CC_Z | CC_C };
1131             break;
1132         case JCC_S:
1133             cc = gen_prepare_eflags_s(s, reg);
1134             break;
1135         case JCC_P:
1136             cc = gen_prepare_eflags_p(s, reg);
1137             break;
1138         case JCC_L:
1139             gen_compute_eflags(s);
1140             if (reg == cpu_cc_src) {
1141                 reg = s->tmp0;
1142             }
1143             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1144             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1145             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1146                                .mask = CC_S };
1147             break;
1148         default:
1149         case JCC_LE:
1150             gen_compute_eflags(s);
1151             if (reg == cpu_cc_src) {
1152                 reg = s->tmp0;
1153             }
1154             tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1155             tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1156             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1157                                .mask = CC_S | CC_Z };
1158             break;
1159         }
1160         break;
1161     }
1162 
1163     if (inv) {
1164         cc.cond = tcg_invert_cond(cc.cond);
1165     }
1166     return cc;
1167 }
1168 
1169 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1170 {
1171     CCPrepare cc = gen_prepare_cc(s, b, reg);
1172 
1173     if (cc.no_setcond) {
1174         if (cc.cond == TCG_COND_EQ) {
1175             tcg_gen_xori_tl(reg, cc.reg, 1);
1176         } else {
1177             tcg_gen_mov_tl(reg, cc.reg);
1178         }
1179         return;
1180     }
1181 
1182     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1183         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1184         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1185         tcg_gen_andi_tl(reg, reg, 1);
1186         return;
1187     }
1188     if (cc.mask != -1) {
1189         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1190         cc.reg = reg;
1191     }
1192     if (cc.use_reg2) {
1193         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1194     } else {
1195         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1196     }
1197 }
1198 
1199 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1200 {
1201     gen_setcc1(s, JCC_B << 1, reg);
1202 }
1203 
1204 /* generate a conditional jump to label 'l1' according to jump opcode
1205    value 'b'. In the fast case, T0 is guaranted not to be used. */
1206 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1207 {
1208     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1209 
1210     if (cc.mask != -1) {
1211         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1212         cc.reg = s->T0;
1213     }
1214     if (cc.use_reg2) {
1215         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1216     } else {
1217         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1218     }
1219 }
1220 
1221 /* Generate a conditional jump to label 'l1' according to jump opcode
1222    value 'b'. In the fast case, T0 is guaranted not to be used.
1223    A translation block must end soon.  */
1224 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1225 {
1226     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1227 
1228     gen_update_cc_op(s);
1229     if (cc.mask != -1) {
1230         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1231         cc.reg = s->T0;
1232     }
1233     set_cc_op(s, CC_OP_DYNAMIC);
1234     if (cc.use_reg2) {
1235         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1236     } else {
1237         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1238     }
1239 }
1240 
1241 /* XXX: does not work with gdbstub "ice" single step - not a
1242    serious problem */
1243 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1244 {
1245     TCGLabel *l1 = gen_new_label();
1246     TCGLabel *l2 = gen_new_label();
1247     gen_op_jnz_ecx(s, l1);
1248     gen_set_label(l2);
1249     gen_jmp_rel_csize(s, 0, 1);
1250     gen_set_label(l1);
1251     return l2;
1252 }
1253 
1254 static void gen_stos(DisasContext *s, MemOp ot)
1255 {
1256     gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
1257     gen_string_movl_A0_EDI(s);
1258     gen_op_st_v(s, ot, s->T0, s->A0);
1259     gen_op_movl_T0_Dshift(s, ot);
1260     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1261 }
1262 
1263 static void gen_lods(DisasContext *s, MemOp ot)
1264 {
1265     gen_string_movl_A0_ESI(s);
1266     gen_op_ld_v(s, ot, s->T0, s->A0);
1267     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1268     gen_op_movl_T0_Dshift(s, ot);
1269     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1270 }
1271 
1272 static void gen_scas(DisasContext *s, MemOp ot)
1273 {
1274     gen_string_movl_A0_EDI(s);
1275     gen_op_ld_v(s, ot, s->T1, s->A0);
1276     gen_op(s, OP_CMPL, ot, R_EAX);
1277     gen_op_movl_T0_Dshift(s, ot);
1278     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1279 }
1280 
1281 static void gen_cmps(DisasContext *s, MemOp ot)
1282 {
1283     gen_string_movl_A0_EDI(s);
1284     gen_op_ld_v(s, ot, s->T1, s->A0);
1285     gen_string_movl_A0_ESI(s);
1286     gen_op(s, OP_CMPL, ot, OR_TMP0);
1287     gen_op_movl_T0_Dshift(s, ot);
1288     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1289     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1290 }
1291 
1292 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1293 {
1294     if (s->flags & HF_IOBPT_MASK) {
1295 #ifdef CONFIG_USER_ONLY
1296         /* user-mode cpu should not be in IOBPT mode */
1297         g_assert_not_reached();
1298 #else
1299         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1300         TCGv t_next = eip_next_tl(s);
1301         gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1302 #endif /* CONFIG_USER_ONLY */
1303     }
1304 }
1305 
1306 static void gen_ins(DisasContext *s, MemOp ot)
1307 {
1308     gen_string_movl_A0_EDI(s);
1309     /* Note: we must do this dummy write first to be restartable in
1310        case of page fault. */
1311     tcg_gen_movi_tl(s->T0, 0);
1312     gen_op_st_v(s, ot, s->T0, s->A0);
1313     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1314     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1315     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1316     gen_op_st_v(s, ot, s->T0, s->A0);
1317     gen_op_movl_T0_Dshift(s, ot);
1318     gen_op_add_reg_T0(s, s->aflag, R_EDI);
1319     gen_bpt_io(s, s->tmp2_i32, ot);
1320 }
1321 
1322 static void gen_outs(DisasContext *s, MemOp ot)
1323 {
1324     gen_string_movl_A0_ESI(s);
1325     gen_op_ld_v(s, ot, s->T0, s->A0);
1326 
1327     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1328     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1329     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1330     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1331     gen_op_movl_T0_Dshift(s, ot);
1332     gen_op_add_reg_T0(s, s->aflag, R_ESI);
1333     gen_bpt_io(s, s->tmp2_i32, ot);
1334 }
1335 
1336 /* Generate jumps to current or next instruction */
1337 static void gen_repz(DisasContext *s, MemOp ot,
1338                      void (*fn)(DisasContext *s, MemOp ot))
1339 {
1340     TCGLabel *l2;
1341     gen_update_cc_op(s);
1342     l2 = gen_jz_ecx_string(s);
1343     fn(s, ot);
1344     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1345     /*
1346      * A loop would cause two single step exceptions if ECX = 1
1347      * before rep string_insn
1348      */
1349     if (s->repz_opt) {
1350         gen_op_jz_ecx(s, l2);
1351     }
1352     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1353 }
1354 
1355 #define GEN_REPZ(op) \
1356     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1357     { gen_repz(s, ot, gen_##op); }
1358 
1359 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1360                       void (*fn)(DisasContext *s, MemOp ot))
1361 {
1362     TCGLabel *l2;
1363     gen_update_cc_op(s);
1364     l2 = gen_jz_ecx_string(s);
1365     fn(s, ot);
1366     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1367     gen_update_cc_op(s);
1368     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1369     if (s->repz_opt) {
1370         gen_op_jz_ecx(s, l2);
1371     }
1372     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1373 }
1374 
1375 #define GEN_REPZ2(op) \
1376     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1377     { gen_repz2(s, ot, nz, gen_##op); }
1378 
1379 GEN_REPZ(movs)
1380 GEN_REPZ(stos)
1381 GEN_REPZ(lods)
1382 GEN_REPZ(ins)
1383 GEN_REPZ(outs)
1384 GEN_REPZ2(scas)
1385 GEN_REPZ2(cmps)
1386 
1387 static void gen_helper_fp_arith_ST0_FT0(int op)
1388 {
1389     switch (op) {
1390     case 0:
1391         gen_helper_fadd_ST0_FT0(cpu_env);
1392         break;
1393     case 1:
1394         gen_helper_fmul_ST0_FT0(cpu_env);
1395         break;
1396     case 2:
1397         gen_helper_fcom_ST0_FT0(cpu_env);
1398         break;
1399     case 3:
1400         gen_helper_fcom_ST0_FT0(cpu_env);
1401         break;
1402     case 4:
1403         gen_helper_fsub_ST0_FT0(cpu_env);
1404         break;
1405     case 5:
1406         gen_helper_fsubr_ST0_FT0(cpu_env);
1407         break;
1408     case 6:
1409         gen_helper_fdiv_ST0_FT0(cpu_env);
1410         break;
1411     case 7:
1412         gen_helper_fdivr_ST0_FT0(cpu_env);
1413         break;
1414     }
1415 }
1416 
1417 /* NOTE the exception in "r" op ordering */
1418 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1419 {
1420     TCGv_i32 tmp = tcg_constant_i32(opreg);
1421     switch (op) {
1422     case 0:
1423         gen_helper_fadd_STN_ST0(cpu_env, tmp);
1424         break;
1425     case 1:
1426         gen_helper_fmul_STN_ST0(cpu_env, tmp);
1427         break;
1428     case 4:
1429         gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1430         break;
1431     case 5:
1432         gen_helper_fsub_STN_ST0(cpu_env, tmp);
1433         break;
1434     case 6:
1435         gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1436         break;
1437     case 7:
1438         gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1439         break;
1440     }
1441 }
1442 
1443 static void gen_exception(DisasContext *s, int trapno)
1444 {
1445     gen_update_cc_op(s);
1446     gen_update_eip_cur(s);
1447     gen_helper_raise_exception(cpu_env, tcg_constant_i32(trapno));
1448     s->base.is_jmp = DISAS_NORETURN;
1449 }
1450 
1451 /* Generate #UD for the current instruction.  The assumption here is that
1452    the instruction is known, but it isn't allowed in the current cpu mode.  */
1453 static void gen_illegal_opcode(DisasContext *s)
1454 {
1455     gen_exception(s, EXCP06_ILLOP);
1456 }
1457 
1458 /* Generate #GP for the current instruction. */
1459 static void gen_exception_gpf(DisasContext *s)
1460 {
1461     gen_exception(s, EXCP0D_GPF);
1462 }
1463 
1464 /* Check for cpl == 0; if not, raise #GP and return false. */
1465 static bool check_cpl0(DisasContext *s)
1466 {
1467     if (CPL(s) == 0) {
1468         return true;
1469     }
1470     gen_exception_gpf(s);
1471     return false;
1472 }
1473 
1474 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1475 static bool check_vm86_iopl(DisasContext *s)
1476 {
1477     if (!VM86(s) || IOPL(s) == 3) {
1478         return true;
1479     }
1480     gen_exception_gpf(s);
1481     return false;
1482 }
1483 
1484 /* Check for iopl allowing access; if not, raise #GP and return false. */
1485 static bool check_iopl(DisasContext *s)
1486 {
1487     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1488         return true;
1489     }
1490     gen_exception_gpf(s);
1491     return false;
1492 }
1493 
1494 /* if d == OR_TMP0, it means memory operand (address in A0) */
1495 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1496 {
1497     if (d != OR_TMP0) {
1498         if (s1->prefix & PREFIX_LOCK) {
1499             /* Lock prefix when destination is not memory.  */
1500             gen_illegal_opcode(s1);
1501             return;
1502         }
1503         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1504     } else if (!(s1->prefix & PREFIX_LOCK)) {
1505         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1506     }
1507     switch(op) {
1508     case OP_ADCL:
1509         gen_compute_eflags_c(s1, s1->tmp4);
1510         if (s1->prefix & PREFIX_LOCK) {
1511             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1512             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1513                                         s1->mem_index, ot | MO_LE);
1514         } else {
1515             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1516             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1517             gen_op_st_rm_T0_A0(s1, ot, d);
1518         }
1519         gen_op_update3_cc(s1, s1->tmp4);
1520         set_cc_op(s1, CC_OP_ADCB + ot);
1521         break;
1522     case OP_SBBL:
1523         gen_compute_eflags_c(s1, s1->tmp4);
1524         if (s1->prefix & PREFIX_LOCK) {
1525             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1526             tcg_gen_neg_tl(s1->T0, s1->T0);
1527             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1528                                         s1->mem_index, ot | MO_LE);
1529         } else {
1530             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1531             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1532             gen_op_st_rm_T0_A0(s1, ot, d);
1533         }
1534         gen_op_update3_cc(s1, s1->tmp4);
1535         set_cc_op(s1, CC_OP_SBBB + ot);
1536         break;
1537     case OP_ADDL:
1538         if (s1->prefix & PREFIX_LOCK) {
1539             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1540                                         s1->mem_index, ot | MO_LE);
1541         } else {
1542             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1543             gen_op_st_rm_T0_A0(s1, ot, d);
1544         }
1545         gen_op_update2_cc(s1);
1546         set_cc_op(s1, CC_OP_ADDB + ot);
1547         break;
1548     case OP_SUBL:
1549         if (s1->prefix & PREFIX_LOCK) {
1550             tcg_gen_neg_tl(s1->T0, s1->T1);
1551             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1552                                         s1->mem_index, ot | MO_LE);
1553             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1554         } else {
1555             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1556             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1557             gen_op_st_rm_T0_A0(s1, ot, d);
1558         }
1559         gen_op_update2_cc(s1);
1560         set_cc_op(s1, CC_OP_SUBB + ot);
1561         break;
1562     default:
1563     case OP_ANDL:
1564         if (s1->prefix & PREFIX_LOCK) {
1565             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1566                                         s1->mem_index, ot | MO_LE);
1567         } else {
1568             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1569             gen_op_st_rm_T0_A0(s1, ot, d);
1570         }
1571         gen_op_update1_cc(s1);
1572         set_cc_op(s1, CC_OP_LOGICB + ot);
1573         break;
1574     case OP_ORL:
1575         if (s1->prefix & PREFIX_LOCK) {
1576             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1577                                        s1->mem_index, ot | MO_LE);
1578         } else {
1579             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1580             gen_op_st_rm_T0_A0(s1, ot, d);
1581         }
1582         gen_op_update1_cc(s1);
1583         set_cc_op(s1, CC_OP_LOGICB + ot);
1584         break;
1585     case OP_XORL:
1586         if (s1->prefix & PREFIX_LOCK) {
1587             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1588                                         s1->mem_index, ot | MO_LE);
1589         } else {
1590             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1591             gen_op_st_rm_T0_A0(s1, ot, d);
1592         }
1593         gen_op_update1_cc(s1);
1594         set_cc_op(s1, CC_OP_LOGICB + ot);
1595         break;
1596     case OP_CMPL:
1597         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1598         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1599         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1600         set_cc_op(s1, CC_OP_SUBB + ot);
1601         break;
1602     }
1603 }
1604 
1605 /* if d == OR_TMP0, it means memory operand (address in A0) */
1606 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1607 {
1608     if (s1->prefix & PREFIX_LOCK) {
1609         if (d != OR_TMP0) {
1610             /* Lock prefix when destination is not memory */
1611             gen_illegal_opcode(s1);
1612             return;
1613         }
1614         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1615         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1616                                     s1->mem_index, ot | MO_LE);
1617     } else {
1618         if (d != OR_TMP0) {
1619             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1620         } else {
1621             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1622         }
1623         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1624         gen_op_st_rm_T0_A0(s1, ot, d);
1625     }
1626 
1627     gen_compute_eflags_c(s1, cpu_cc_src);
1628     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1629     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1630 }
1631 
1632 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1633                             TCGv shm1, TCGv count, bool is_right)
1634 {
1635     TCGv_i32 z32, s32, oldop;
1636     TCGv z_tl;
1637 
1638     /* Store the results into the CC variables.  If we know that the
1639        variable must be dead, store unconditionally.  Otherwise we'll
1640        need to not disrupt the current contents.  */
1641     z_tl = tcg_constant_tl(0);
1642     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1643         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1644                            result, cpu_cc_dst);
1645     } else {
1646         tcg_gen_mov_tl(cpu_cc_dst, result);
1647     }
1648     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1649         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1650                            shm1, cpu_cc_src);
1651     } else {
1652         tcg_gen_mov_tl(cpu_cc_src, shm1);
1653     }
1654 
1655     /* Get the two potential CC_OP values into temporaries.  */
1656     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1657     if (s->cc_op == CC_OP_DYNAMIC) {
1658         oldop = cpu_cc_op;
1659     } else {
1660         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1661         oldop = s->tmp3_i32;
1662     }
1663 
1664     /* Conditionally store the CC_OP value.  */
1665     z32 = tcg_constant_i32(0);
1666     s32 = tcg_temp_new_i32();
1667     tcg_gen_trunc_tl_i32(s32, count);
1668     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1669 
1670     /* The CC_OP value is no longer predictable.  */
1671     set_cc_op(s, CC_OP_DYNAMIC);
1672 }
1673 
1674 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1675                             int is_right, int is_arith)
1676 {
1677     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1678 
1679     /* load */
1680     if (op1 == OR_TMP0) {
1681         gen_op_ld_v(s, ot, s->T0, s->A0);
1682     } else {
1683         gen_op_mov_v_reg(s, ot, s->T0, op1);
1684     }
1685 
1686     tcg_gen_andi_tl(s->T1, s->T1, mask);
1687     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1688 
1689     if (is_right) {
1690         if (is_arith) {
1691             gen_exts(ot, s->T0);
1692             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1693             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1694         } else {
1695             gen_extu(ot, s->T0);
1696             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1697             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1698         }
1699     } else {
1700         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1701         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1702     }
1703 
1704     /* store */
1705     gen_op_st_rm_T0_A0(s, ot, op1);
1706 
1707     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1708 }
1709 
1710 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1711                             int is_right, int is_arith)
1712 {
1713     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1714 
1715     /* load */
1716     if (op1 == OR_TMP0)
1717         gen_op_ld_v(s, ot, s->T0, s->A0);
1718     else
1719         gen_op_mov_v_reg(s, ot, s->T0, op1);
1720 
1721     op2 &= mask;
1722     if (op2 != 0) {
1723         if (is_right) {
1724             if (is_arith) {
1725                 gen_exts(ot, s->T0);
1726                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1727                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1728             } else {
1729                 gen_extu(ot, s->T0);
1730                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1731                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1732             }
1733         } else {
1734             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1735             tcg_gen_shli_tl(s->T0, s->T0, op2);
1736         }
1737     }
1738 
1739     /* store */
1740     gen_op_st_rm_T0_A0(s, ot, op1);
1741 
1742     /* update eflags if non zero shift */
1743     if (op2 != 0) {
1744         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1745         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1746         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1747     }
1748 }
1749 
1750 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1751 {
1752     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1753     TCGv_i32 t0, t1;
1754 
1755     /* load */
1756     if (op1 == OR_TMP0) {
1757         gen_op_ld_v(s, ot, s->T0, s->A0);
1758     } else {
1759         gen_op_mov_v_reg(s, ot, s->T0, op1);
1760     }
1761 
1762     tcg_gen_andi_tl(s->T1, s->T1, mask);
1763 
1764     switch (ot) {
1765     case MO_8:
1766         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1767         tcg_gen_ext8u_tl(s->T0, s->T0);
1768         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1769         goto do_long;
1770     case MO_16:
1771         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1772         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1773         goto do_long;
1774     do_long:
1775 #ifdef TARGET_X86_64
1776     case MO_32:
1777         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1778         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1779         if (is_right) {
1780             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1781         } else {
1782             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1783         }
1784         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1785         break;
1786 #endif
1787     default:
1788         if (is_right) {
1789             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1790         } else {
1791             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1792         }
1793         break;
1794     }
1795 
1796     /* store */
1797     gen_op_st_rm_T0_A0(s, ot, op1);
1798 
1799     /* We'll need the flags computed into CC_SRC.  */
1800     gen_compute_eflags(s);
1801 
1802     /* The value that was "rotated out" is now present at the other end
1803        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1804        since we've computed the flags into CC_SRC, these variables are
1805        currently dead.  */
1806     if (is_right) {
1807         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1808         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1809         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1810     } else {
1811         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1812         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1813     }
1814     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1815     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1816 
1817     /* Now conditionally store the new CC_OP value.  If the shift count
1818        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1819        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1820        exactly as we computed above.  */
1821     t0 = tcg_constant_i32(0);
1822     t1 = tcg_temp_new_i32();
1823     tcg_gen_trunc_tl_i32(t1, s->T1);
1824     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1825     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1826     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1827                         s->tmp2_i32, s->tmp3_i32);
1828 
1829     /* The CC_OP value is no longer predictable.  */
1830     set_cc_op(s, CC_OP_DYNAMIC);
1831 }
1832 
1833 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1834                           int is_right)
1835 {
1836     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1837     int shift;
1838 
1839     /* load */
1840     if (op1 == OR_TMP0) {
1841         gen_op_ld_v(s, ot, s->T0, s->A0);
1842     } else {
1843         gen_op_mov_v_reg(s, ot, s->T0, op1);
1844     }
1845 
1846     op2 &= mask;
1847     if (op2 != 0) {
1848         switch (ot) {
1849 #ifdef TARGET_X86_64
1850         case MO_32:
1851             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1852             if (is_right) {
1853                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1854             } else {
1855                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1856             }
1857             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1858             break;
1859 #endif
1860         default:
1861             if (is_right) {
1862                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1863             } else {
1864                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1865             }
1866             break;
1867         case MO_8:
1868             mask = 7;
1869             goto do_shifts;
1870         case MO_16:
1871             mask = 15;
1872         do_shifts:
1873             shift = op2 & mask;
1874             if (is_right) {
1875                 shift = mask + 1 - shift;
1876             }
1877             gen_extu(ot, s->T0);
1878             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1879             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1880             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1881             break;
1882         }
1883     }
1884 
1885     /* store */
1886     gen_op_st_rm_T0_A0(s, ot, op1);
1887 
1888     if (op2 != 0) {
1889         /* Compute the flags into CC_SRC.  */
1890         gen_compute_eflags(s);
1891 
1892         /* The value that was "rotated out" is now present at the other end
1893            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1894            since we've computed the flags into CC_SRC, these variables are
1895            currently dead.  */
1896         if (is_right) {
1897             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1898             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1899             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1900         } else {
1901             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1902             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1903         }
1904         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1905         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1906         set_cc_op(s, CC_OP_ADCOX);
1907     }
1908 }
1909 
1910 /* XXX: add faster immediate = 1 case */
1911 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1912                            int is_right)
1913 {
1914     gen_compute_eflags(s);
1915     assert(s->cc_op == CC_OP_EFLAGS);
1916 
1917     /* load */
1918     if (op1 == OR_TMP0)
1919         gen_op_ld_v(s, ot, s->T0, s->A0);
1920     else
1921         gen_op_mov_v_reg(s, ot, s->T0, op1);
1922 
1923     if (is_right) {
1924         switch (ot) {
1925         case MO_8:
1926             gen_helper_rcrb(s->T0, cpu_env, s->T0, s->T1);
1927             break;
1928         case MO_16:
1929             gen_helper_rcrw(s->T0, cpu_env, s->T0, s->T1);
1930             break;
1931         case MO_32:
1932             gen_helper_rcrl(s->T0, cpu_env, s->T0, s->T1);
1933             break;
1934 #ifdef TARGET_X86_64
1935         case MO_64:
1936             gen_helper_rcrq(s->T0, cpu_env, s->T0, s->T1);
1937             break;
1938 #endif
1939         default:
1940             g_assert_not_reached();
1941         }
1942     } else {
1943         switch (ot) {
1944         case MO_8:
1945             gen_helper_rclb(s->T0, cpu_env, s->T0, s->T1);
1946             break;
1947         case MO_16:
1948             gen_helper_rclw(s->T0, cpu_env, s->T0, s->T1);
1949             break;
1950         case MO_32:
1951             gen_helper_rcll(s->T0, cpu_env, s->T0, s->T1);
1952             break;
1953 #ifdef TARGET_X86_64
1954         case MO_64:
1955             gen_helper_rclq(s->T0, cpu_env, s->T0, s->T1);
1956             break;
1957 #endif
1958         default:
1959             g_assert_not_reached();
1960         }
1961     }
1962     /* store */
1963     gen_op_st_rm_T0_A0(s, ot, op1);
1964 }
1965 
1966 /* XXX: add faster immediate case */
1967 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1968                              bool is_right, TCGv count_in)
1969 {
1970     target_ulong mask = (ot == MO_64 ? 63 : 31);
1971     TCGv count;
1972 
1973     /* load */
1974     if (op1 == OR_TMP0) {
1975         gen_op_ld_v(s, ot, s->T0, s->A0);
1976     } else {
1977         gen_op_mov_v_reg(s, ot, s->T0, op1);
1978     }
1979 
1980     count = tcg_temp_new();
1981     tcg_gen_andi_tl(count, count_in, mask);
1982 
1983     switch (ot) {
1984     case MO_16:
1985         /* Note: we implement the Intel behaviour for shift count > 16.
1986            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
1987            portion by constructing it as a 32-bit value.  */
1988         if (is_right) {
1989             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
1990             tcg_gen_mov_tl(s->T1, s->T0);
1991             tcg_gen_mov_tl(s->T0, s->tmp0);
1992         } else {
1993             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
1994         }
1995         /*
1996          * If TARGET_X86_64 defined then fall through into MO_32 case,
1997          * otherwise fall through default case.
1998          */
1999     case MO_32:
2000 #ifdef TARGET_X86_64
2001         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2002         tcg_gen_subi_tl(s->tmp0, count, 1);
2003         if (is_right) {
2004             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2005             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2006             tcg_gen_shr_i64(s->T0, s->T0, count);
2007         } else {
2008             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2009             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2010             tcg_gen_shl_i64(s->T0, s->T0, count);
2011             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2012             tcg_gen_shri_i64(s->T0, s->T0, 32);
2013         }
2014         break;
2015 #endif
2016     default:
2017         tcg_gen_subi_tl(s->tmp0, count, 1);
2018         if (is_right) {
2019             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2020 
2021             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2022             tcg_gen_shr_tl(s->T0, s->T0, count);
2023             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2024         } else {
2025             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2026             if (ot == MO_16) {
2027                 /* Only needed if count > 16, for Intel behaviour.  */
2028                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2029                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2030                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2031             }
2032 
2033             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2034             tcg_gen_shl_tl(s->T0, s->T0, count);
2035             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2036         }
2037         tcg_gen_movi_tl(s->tmp4, 0);
2038         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2039                            s->tmp4, s->T1);
2040         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2041         break;
2042     }
2043 
2044     /* store */
2045     gen_op_st_rm_T0_A0(s, ot, op1);
2046 
2047     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2048 }
2049 
2050 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2051 {
2052     if (s != OR_TMP1)
2053         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2054     switch(op) {
2055     case OP_ROL:
2056         gen_rot_rm_T1(s1, ot, d, 0);
2057         break;
2058     case OP_ROR:
2059         gen_rot_rm_T1(s1, ot, d, 1);
2060         break;
2061     case OP_SHL:
2062     case OP_SHL1:
2063         gen_shift_rm_T1(s1, ot, d, 0, 0);
2064         break;
2065     case OP_SHR:
2066         gen_shift_rm_T1(s1, ot, d, 1, 0);
2067         break;
2068     case OP_SAR:
2069         gen_shift_rm_T1(s1, ot, d, 1, 1);
2070         break;
2071     case OP_RCL:
2072         gen_rotc_rm_T1(s1, ot, d, 0);
2073         break;
2074     case OP_RCR:
2075         gen_rotc_rm_T1(s1, ot, d, 1);
2076         break;
2077     }
2078 }
2079 
2080 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2081 {
2082     switch(op) {
2083     case OP_ROL:
2084         gen_rot_rm_im(s1, ot, d, c, 0);
2085         break;
2086     case OP_ROR:
2087         gen_rot_rm_im(s1, ot, d, c, 1);
2088         break;
2089     case OP_SHL:
2090     case OP_SHL1:
2091         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2092         break;
2093     case OP_SHR:
2094         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2095         break;
2096     case OP_SAR:
2097         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2098         break;
2099     default:
2100         /* currently not optimized */
2101         tcg_gen_movi_tl(s1->T1, c);
2102         gen_shift(s1, op, ot, d, OR_TMP1);
2103         break;
2104     }
2105 }
2106 
2107 #define X86_MAX_INSN_LENGTH 15
2108 
2109 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2110 {
2111     uint64_t pc = s->pc;
2112 
2113     /* This is a subsequent insn that crosses a page boundary.  */
2114     if (s->base.num_insns > 1 &&
2115         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2116         siglongjmp(s->jmpbuf, 2);
2117     }
2118 
2119     s->pc += num_bytes;
2120     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2121         /* If the instruction's 16th byte is on a different page than the 1st, a
2122          * page fault on the second page wins over the general protection fault
2123          * caused by the instruction being too long.
2124          * This can happen even if the operand is only one byte long!
2125          */
2126         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2127             volatile uint8_t unused =
2128                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2129             (void) unused;
2130         }
2131         siglongjmp(s->jmpbuf, 1);
2132     }
2133 
2134     return pc;
2135 }
2136 
2137 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2138 {
2139     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2140 }
2141 
2142 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2143 {
2144     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2145 }
2146 
2147 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2148 {
2149     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2150 }
2151 
2152 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2153 {
2154     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2155 }
2156 
2157 #ifdef TARGET_X86_64
2158 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2159 {
2160     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2161 }
2162 #endif
2163 
2164 /* Decompose an address.  */
2165 
2166 typedef struct AddressParts {
2167     int def_seg;
2168     int base;
2169     int index;
2170     int scale;
2171     target_long disp;
2172 } AddressParts;
2173 
2174 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2175                                     int modrm)
2176 {
2177     int def_seg, base, index, scale, mod, rm;
2178     target_long disp;
2179     bool havesib;
2180 
2181     def_seg = R_DS;
2182     index = -1;
2183     scale = 0;
2184     disp = 0;
2185 
2186     mod = (modrm >> 6) & 3;
2187     rm = modrm & 7;
2188     base = rm | REX_B(s);
2189 
2190     if (mod == 3) {
2191         /* Normally filtered out earlier, but including this path
2192            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2193         goto done;
2194     }
2195 
2196     switch (s->aflag) {
2197     case MO_64:
2198     case MO_32:
2199         havesib = 0;
2200         if (rm == 4) {
2201             int code = x86_ldub_code(env, s);
2202             scale = (code >> 6) & 3;
2203             index = ((code >> 3) & 7) | REX_X(s);
2204             if (index == 4) {
2205                 index = -1;  /* no index */
2206             }
2207             base = (code & 7) | REX_B(s);
2208             havesib = 1;
2209         }
2210 
2211         switch (mod) {
2212         case 0:
2213             if ((base & 7) == 5) {
2214                 base = -1;
2215                 disp = (int32_t)x86_ldl_code(env, s);
2216                 if (CODE64(s) && !havesib) {
2217                     base = -2;
2218                     disp += s->pc + s->rip_offset;
2219                 }
2220             }
2221             break;
2222         case 1:
2223             disp = (int8_t)x86_ldub_code(env, s);
2224             break;
2225         default:
2226         case 2:
2227             disp = (int32_t)x86_ldl_code(env, s);
2228             break;
2229         }
2230 
2231         /* For correct popl handling with esp.  */
2232         if (base == R_ESP && s->popl_esp_hack) {
2233             disp += s->popl_esp_hack;
2234         }
2235         if (base == R_EBP || base == R_ESP) {
2236             def_seg = R_SS;
2237         }
2238         break;
2239 
2240     case MO_16:
2241         if (mod == 0) {
2242             if (rm == 6) {
2243                 base = -1;
2244                 disp = x86_lduw_code(env, s);
2245                 break;
2246             }
2247         } else if (mod == 1) {
2248             disp = (int8_t)x86_ldub_code(env, s);
2249         } else {
2250             disp = (int16_t)x86_lduw_code(env, s);
2251         }
2252 
2253         switch (rm) {
2254         case 0:
2255             base = R_EBX;
2256             index = R_ESI;
2257             break;
2258         case 1:
2259             base = R_EBX;
2260             index = R_EDI;
2261             break;
2262         case 2:
2263             base = R_EBP;
2264             index = R_ESI;
2265             def_seg = R_SS;
2266             break;
2267         case 3:
2268             base = R_EBP;
2269             index = R_EDI;
2270             def_seg = R_SS;
2271             break;
2272         case 4:
2273             base = R_ESI;
2274             break;
2275         case 5:
2276             base = R_EDI;
2277             break;
2278         case 6:
2279             base = R_EBP;
2280             def_seg = R_SS;
2281             break;
2282         default:
2283         case 7:
2284             base = R_EBX;
2285             break;
2286         }
2287         break;
2288 
2289     default:
2290         g_assert_not_reached();
2291     }
2292 
2293  done:
2294     return (AddressParts){ def_seg, base, index, scale, disp };
2295 }
2296 
2297 /* Compute the address, with a minimum number of TCG ops.  */
2298 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2299 {
2300     TCGv ea = NULL;
2301 
2302     if (a.index >= 0 && !is_vsib) {
2303         if (a.scale == 0) {
2304             ea = cpu_regs[a.index];
2305         } else {
2306             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2307             ea = s->A0;
2308         }
2309         if (a.base >= 0) {
2310             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2311             ea = s->A0;
2312         }
2313     } else if (a.base >= 0) {
2314         ea = cpu_regs[a.base];
2315     }
2316     if (!ea) {
2317         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2318             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2319             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2320         } else {
2321             tcg_gen_movi_tl(s->A0, a.disp);
2322         }
2323         ea = s->A0;
2324     } else if (a.disp != 0) {
2325         tcg_gen_addi_tl(s->A0, ea, a.disp);
2326         ea = s->A0;
2327     }
2328 
2329     return ea;
2330 }
2331 
2332 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2333 {
2334     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2335     TCGv ea = gen_lea_modrm_1(s, a, false);
2336     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2337 }
2338 
2339 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2340 {
2341     (void)gen_lea_modrm_0(env, s, modrm);
2342 }
2343 
2344 /* Used for BNDCL, BNDCU, BNDCN.  */
2345 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2346                       TCGCond cond, TCGv_i64 bndv)
2347 {
2348     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2349     TCGv ea = gen_lea_modrm_1(s, a, false);
2350 
2351     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2352     if (!CODE64(s)) {
2353         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2354     }
2355     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2356     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2357     gen_helper_bndck(cpu_env, s->tmp2_i32);
2358 }
2359 
2360 /* used for LEA and MOV AX, mem */
2361 static void gen_add_A0_ds_seg(DisasContext *s)
2362 {
2363     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2364 }
2365 
2366 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2367    OR_TMP0 */
2368 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2369                            MemOp ot, int reg, int is_store)
2370 {
2371     int mod, rm;
2372 
2373     mod = (modrm >> 6) & 3;
2374     rm = (modrm & 7) | REX_B(s);
2375     if (mod == 3) {
2376         if (is_store) {
2377             if (reg != OR_TMP0)
2378                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2379             gen_op_mov_reg_v(s, ot, rm, s->T0);
2380         } else {
2381             gen_op_mov_v_reg(s, ot, s->T0, rm);
2382             if (reg != OR_TMP0)
2383                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2384         }
2385     } else {
2386         gen_lea_modrm(env, s, modrm);
2387         if (is_store) {
2388             if (reg != OR_TMP0)
2389                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2390             gen_op_st_v(s, ot, s->T0, s->A0);
2391         } else {
2392             gen_op_ld_v(s, ot, s->T0, s->A0);
2393             if (reg != OR_TMP0)
2394                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2395         }
2396     }
2397 }
2398 
2399 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2400 {
2401     target_ulong ret;
2402 
2403     switch (ot) {
2404     case MO_8:
2405         ret = x86_ldub_code(env, s);
2406         break;
2407     case MO_16:
2408         ret = x86_lduw_code(env, s);
2409         break;
2410     case MO_32:
2411         ret = x86_ldl_code(env, s);
2412         break;
2413 #ifdef TARGET_X86_64
2414     case MO_64:
2415         ret = x86_ldq_code(env, s);
2416         break;
2417 #endif
2418     default:
2419         g_assert_not_reached();
2420     }
2421     return ret;
2422 }
2423 
2424 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2425 {
2426     uint32_t ret;
2427 
2428     switch (ot) {
2429     case MO_8:
2430         ret = x86_ldub_code(env, s);
2431         break;
2432     case MO_16:
2433         ret = x86_lduw_code(env, s);
2434         break;
2435     case MO_32:
2436 #ifdef TARGET_X86_64
2437     case MO_64:
2438 #endif
2439         ret = x86_ldl_code(env, s);
2440         break;
2441     default:
2442         g_assert_not_reached();
2443     }
2444     return ret;
2445 }
2446 
2447 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2448 {
2449     target_long ret;
2450 
2451     switch (ot) {
2452     case MO_8:
2453         ret = (int8_t) x86_ldub_code(env, s);
2454         break;
2455     case MO_16:
2456         ret = (int16_t) x86_lduw_code(env, s);
2457         break;
2458     case MO_32:
2459         ret = (int32_t) x86_ldl_code(env, s);
2460         break;
2461 #ifdef TARGET_X86_64
2462     case MO_64:
2463         ret = x86_ldq_code(env, s);
2464         break;
2465 #endif
2466     default:
2467         g_assert_not_reached();
2468     }
2469     return ret;
2470 }
2471 
2472 static inline int insn_const_size(MemOp ot)
2473 {
2474     if (ot <= MO_32) {
2475         return 1 << ot;
2476     } else {
2477         return 4;
2478     }
2479 }
2480 
2481 static void gen_jcc(DisasContext *s, int b, int diff)
2482 {
2483     TCGLabel *l1 = gen_new_label();
2484 
2485     gen_jcc1(s, b, l1);
2486     gen_jmp_rel_csize(s, 0, 1);
2487     gen_set_label(l1);
2488     gen_jmp_rel(s, s->dflag, diff, 0);
2489 }
2490 
2491 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b,
2492                         int modrm, int reg)
2493 {
2494     CCPrepare cc;
2495 
2496     gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2497 
2498     cc = gen_prepare_cc(s, b, s->T1);
2499     if (cc.mask != -1) {
2500         TCGv t0 = tcg_temp_new();
2501         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2502         cc.reg = t0;
2503     }
2504     if (!cc.use_reg2) {
2505         cc.reg2 = tcg_constant_tl(cc.imm);
2506     }
2507 
2508     tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2,
2509                        s->T0, cpu_regs[reg]);
2510     gen_op_mov_reg_v(s, ot, reg, s->T0);
2511 }
2512 
2513 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2514 {
2515     tcg_gen_ld32u_tl(s->T0, cpu_env,
2516                      offsetof(CPUX86State,segs[seg_reg].selector));
2517 }
2518 
2519 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2520 {
2521     tcg_gen_ext16u_tl(s->T0, s->T0);
2522     tcg_gen_st32_tl(s->T0, cpu_env,
2523                     offsetof(CPUX86State,segs[seg_reg].selector));
2524     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2525 }
2526 
2527 /* move T0 to seg_reg and compute if the CPU state may change. Never
2528    call this function with seg_reg == R_CS */
2529 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2530 {
2531     if (PE(s) && !VM86(s)) {
2532         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2533         gen_helper_load_seg(cpu_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2534         /* abort translation because the addseg value may change or
2535            because ss32 may change. For R_SS, translation must always
2536            stop as a special handling must be done to disable hardware
2537            interrupts for the next instruction */
2538         if (seg_reg == R_SS) {
2539             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2540         } else if (CODE32(s) && seg_reg < R_FS) {
2541             s->base.is_jmp = DISAS_EOB_NEXT;
2542         }
2543     } else {
2544         gen_op_movl_seg_T0_vm(s, seg_reg);
2545         if (seg_reg == R_SS) {
2546             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2547         }
2548     }
2549 }
2550 
2551 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2552 {
2553     /* no SVM activated; fast case */
2554     if (likely(!GUEST(s))) {
2555         return;
2556     }
2557     gen_helper_svm_check_intercept(cpu_env, tcg_constant_i32(type));
2558 }
2559 
2560 static inline void gen_stack_update(DisasContext *s, int addend)
2561 {
2562     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2563 }
2564 
2565 /* Generate a push. It depends on ss32, addseg and dflag.  */
2566 static void gen_push_v(DisasContext *s, TCGv val)
2567 {
2568     MemOp d_ot = mo_pushpop(s, s->dflag);
2569     MemOp a_ot = mo_stacksize(s);
2570     int size = 1 << d_ot;
2571     TCGv new_esp = s->A0;
2572 
2573     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2574 
2575     if (!CODE64(s)) {
2576         if (ADDSEG(s)) {
2577             new_esp = s->tmp4;
2578             tcg_gen_mov_tl(new_esp, s->A0);
2579         }
2580         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2581     }
2582 
2583     gen_op_st_v(s, d_ot, val, s->A0);
2584     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2585 }
2586 
2587 /* two step pop is necessary for precise exceptions */
2588 static MemOp gen_pop_T0(DisasContext *s)
2589 {
2590     MemOp d_ot = mo_pushpop(s, s->dflag);
2591 
2592     gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2593     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2594 
2595     return d_ot;
2596 }
2597 
2598 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2599 {
2600     gen_stack_update(s, 1 << ot);
2601 }
2602 
2603 static inline void gen_stack_A0(DisasContext *s)
2604 {
2605     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2606 }
2607 
2608 static void gen_pusha(DisasContext *s)
2609 {
2610     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2611     MemOp d_ot = s->dflag;
2612     int size = 1 << d_ot;
2613     int i;
2614 
2615     for (i = 0; i < 8; i++) {
2616         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2617         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2618         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2619     }
2620 
2621     gen_stack_update(s, -8 * size);
2622 }
2623 
2624 static void gen_popa(DisasContext *s)
2625 {
2626     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2627     MemOp d_ot = s->dflag;
2628     int size = 1 << d_ot;
2629     int i;
2630 
2631     for (i = 0; i < 8; i++) {
2632         /* ESP is not reloaded */
2633         if (7 - i == R_ESP) {
2634             continue;
2635         }
2636         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2637         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2638         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2639         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2640     }
2641 
2642     gen_stack_update(s, 8 * size);
2643 }
2644 
2645 static void gen_enter(DisasContext *s, int esp_addend, int level)
2646 {
2647     MemOp d_ot = mo_pushpop(s, s->dflag);
2648     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2649     int size = 1 << d_ot;
2650 
2651     /* Push BP; compute FrameTemp into T1.  */
2652     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2653     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2654     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2655 
2656     level &= 31;
2657     if (level != 0) {
2658         int i;
2659 
2660         /* Copy level-1 pointers from the previous frame.  */
2661         for (i = 1; i < level; ++i) {
2662             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2663             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2664             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2665 
2666             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2667             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2668             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2669         }
2670 
2671         /* Push the current FrameTemp as the last level.  */
2672         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2673         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2674         gen_op_st_v(s, d_ot, s->T1, s->A0);
2675     }
2676 
2677     /* Copy the FrameTemp value to EBP.  */
2678     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2679 
2680     /* Compute the final value of ESP.  */
2681     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2682     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2683 }
2684 
2685 static void gen_leave(DisasContext *s)
2686 {
2687     MemOp d_ot = mo_pushpop(s, s->dflag);
2688     MemOp a_ot = mo_stacksize(s);
2689 
2690     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2691     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2692 
2693     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2694 
2695     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2696     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2697 }
2698 
2699 /* Similarly, except that the assumption here is that we don't decode
2700    the instruction at all -- either a missing opcode, an unimplemented
2701    feature, or just a bogus instruction stream.  */
2702 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2703 {
2704     gen_illegal_opcode(s);
2705 
2706     if (qemu_loglevel_mask(LOG_UNIMP)) {
2707         FILE *logfile = qemu_log_trylock();
2708         if (logfile) {
2709             target_ulong pc = s->base.pc_next, end = s->pc;
2710 
2711             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2712             for (; pc < end; ++pc) {
2713                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2714             }
2715             fprintf(logfile, "\n");
2716             qemu_log_unlock(logfile);
2717         }
2718     }
2719 }
2720 
2721 /* an interrupt is different from an exception because of the
2722    privilege checks */
2723 static void gen_interrupt(DisasContext *s, int intno)
2724 {
2725     gen_update_cc_op(s);
2726     gen_update_eip_cur(s);
2727     gen_helper_raise_interrupt(cpu_env, tcg_constant_i32(intno),
2728                                cur_insn_len_i32(s));
2729     s->base.is_jmp = DISAS_NORETURN;
2730 }
2731 
2732 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2733 {
2734     if ((s->flags & mask) == 0) {
2735         TCGv_i32 t = tcg_temp_new_i32();
2736         tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2737         tcg_gen_ori_i32(t, t, mask);
2738         tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2739         s->flags |= mask;
2740     }
2741 }
2742 
2743 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2744 {
2745     if (s->flags & mask) {
2746         TCGv_i32 t = tcg_temp_new_i32();
2747         tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2748         tcg_gen_andi_i32(t, t, ~mask);
2749         tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2750         s->flags &= ~mask;
2751     }
2752 }
2753 
2754 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2755 {
2756     TCGv t = tcg_temp_new();
2757 
2758     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2759     tcg_gen_ori_tl(t, t, mask);
2760     tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2761 }
2762 
2763 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2764 {
2765     TCGv t = tcg_temp_new();
2766 
2767     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2768     tcg_gen_andi_tl(t, t, ~mask);
2769     tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags));
2770 }
2771 
2772 /* Clear BND registers during legacy branches.  */
2773 static void gen_bnd_jmp(DisasContext *s)
2774 {
2775     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2776        and if the BNDREGs are known to be in use (non-zero) already.
2777        The helper itself will check BNDPRESERVE at runtime.  */
2778     if ((s->prefix & PREFIX_REPNZ) == 0
2779         && (s->flags & HF_MPX_EN_MASK) != 0
2780         && (s->flags & HF_MPX_IU_MASK) != 0) {
2781         gen_helper_bnd_jmp(cpu_env);
2782     }
2783 }
2784 
2785 /* Generate an end of block. Trace exception is also generated if needed.
2786    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2787    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2788    S->TF.  This is used by the syscall/sysret insns.  */
2789 static void
2790 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2791 {
2792     gen_update_cc_op(s);
2793 
2794     /* If several instructions disable interrupts, only the first does it.  */
2795     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2796         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2797     } else {
2798         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2799     }
2800 
2801     if (s->base.tb->flags & HF_RF_MASK) {
2802         gen_reset_eflags(s, RF_MASK);
2803     }
2804     if (recheck_tf) {
2805         gen_helper_rechecking_single_step(cpu_env);
2806         tcg_gen_exit_tb(NULL, 0);
2807     } else if (s->flags & HF_TF_MASK) {
2808         gen_helper_single_step(cpu_env);
2809     } else if (jr) {
2810         tcg_gen_lookup_and_goto_ptr();
2811     } else {
2812         tcg_gen_exit_tb(NULL, 0);
2813     }
2814     s->base.is_jmp = DISAS_NORETURN;
2815 }
2816 
2817 static inline void
2818 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2819 {
2820     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2821 }
2822 
2823 /* End of block.
2824    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2825 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2826 {
2827     gen_eob_worker(s, inhibit, false);
2828 }
2829 
2830 /* End of block, resetting the inhibit irq flag.  */
2831 static void gen_eob(DisasContext *s)
2832 {
2833     gen_eob_worker(s, false, false);
2834 }
2835 
2836 /* Jump to register */
2837 static void gen_jr(DisasContext *s)
2838 {
2839     do_gen_eob_worker(s, false, false, true);
2840 }
2841 
2842 /* Jump to eip+diff, truncating the result to OT. */
2843 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2844 {
2845     bool use_goto_tb = s->jmp_opt;
2846     target_ulong mask = -1;
2847     target_ulong new_pc = s->pc + diff;
2848     target_ulong new_eip = new_pc - s->cs_base;
2849 
2850     /* In 64-bit mode, operand size is fixed at 64 bits. */
2851     if (!CODE64(s)) {
2852         if (ot == MO_16) {
2853             mask = 0xffff;
2854             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2855                 use_goto_tb = false;
2856             }
2857         } else {
2858             mask = 0xffffffff;
2859         }
2860     }
2861     new_eip &= mask;
2862 
2863     gen_update_cc_op(s);
2864     set_cc_op(s, CC_OP_DYNAMIC);
2865 
2866     if (tb_cflags(s->base.tb) & CF_PCREL) {
2867         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2868         /*
2869          * If we can prove the branch does not leave the page and we have
2870          * no extra masking to apply (data16 branch in code32, see above),
2871          * then we have also proven that the addition does not wrap.
2872          */
2873         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2874             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2875             use_goto_tb = false;
2876         }
2877     }
2878 
2879     if (use_goto_tb &&
2880         translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
2881         /* jump to same page: we can use a direct jump */
2882         tcg_gen_goto_tb(tb_num);
2883         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2884             tcg_gen_movi_tl(cpu_eip, new_eip);
2885         }
2886         tcg_gen_exit_tb(s->base.tb, tb_num);
2887         s->base.is_jmp = DISAS_NORETURN;
2888     } else {
2889         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2890             tcg_gen_movi_tl(cpu_eip, new_eip);
2891         }
2892         if (s->jmp_opt) {
2893             gen_jr(s);   /* jump to another page */
2894         } else {
2895             gen_eob(s);  /* exit to main loop */
2896         }
2897     }
2898 }
2899 
2900 /* Jump to eip+diff, truncating to the current code size. */
2901 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2902 {
2903     /* CODE64 ignores the OT argument, so we need not consider it. */
2904     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2905 }
2906 
2907 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2908 {
2909     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2910     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
2911 }
2912 
2913 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2914 {
2915     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
2916     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2917 }
2918 
2919 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2920 {
2921     int mem_index = s->mem_index;
2922     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2923                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2924     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2925     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2926     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2927     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2928 }
2929 
2930 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2931 {
2932     int mem_index = s->mem_index;
2933     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2934     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2935                         MO_LEUQ | (align ? MO_ALIGN_16 : 0));
2936     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2937     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2938     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2939 }
2940 
2941 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2942 {
2943     int mem_index = s->mem_index;
2944     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
2945                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2946     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2947     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2948     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2949     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2950 
2951     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2952     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2953     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2954     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2955     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2956     tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2957 }
2958 
2959 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2960 {
2961     int mem_index = s->mem_index;
2962     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(0)));
2963     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
2964                         MO_LEUQ | (align ? MO_ALIGN_32 : 0));
2965     tcg_gen_addi_tl(s->tmp0, s->A0, 8);
2966     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(1)));
2967     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2968     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2969     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(2)));
2970     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2971     tcg_gen_addi_tl(s->tmp0, s->A0, 24);
2972     tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(YMMReg, YMM_Q(3)));
2973     tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
2974 }
2975 
2976 #include "decode-new.h"
2977 #include "emit.c.inc"
2978 #include "decode-new.c.inc"
2979 
2980 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2981 {
2982     TCGv_i64 cmp, val, old;
2983     TCGv Z;
2984 
2985     gen_lea_modrm(env, s, modrm);
2986 
2987     cmp = tcg_temp_new_i64();
2988     val = tcg_temp_new_i64();
2989     old = tcg_temp_new_i64();
2990 
2991     /* Construct the comparison values from the register pair. */
2992     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2993     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2994 
2995     /* Only require atomic with LOCK; non-parallel handled in generator. */
2996     if (s->prefix & PREFIX_LOCK) {
2997         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
2998     } else {
2999         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3000                                       s->mem_index, MO_TEUQ);
3001     }
3002 
3003     /* Set tmp0 to match the required value of Z. */
3004     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3005     Z = tcg_temp_new();
3006     tcg_gen_trunc_i64_tl(Z, cmp);
3007 
3008     /*
3009      * Extract the result values for the register pair.
3010      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3011      * the old value matches the previous value in EDX:EAX.  For x86_64,
3012      * the store must be conditional, because we must leave the source
3013      * registers unchanged on success, and zero-extend the writeback
3014      * on failure (Z=0).
3015      */
3016     if (TARGET_LONG_BITS == 32) {
3017         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3018     } else {
3019         TCGv zero = tcg_constant_tl(0);
3020 
3021         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3022         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3023                            s->T0, cpu_regs[R_EAX]);
3024         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3025                            s->T1, cpu_regs[R_EDX]);
3026     }
3027 
3028     /* Update Z. */
3029     gen_compute_eflags(s);
3030     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3031 }
3032 
3033 #ifdef TARGET_X86_64
3034 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3035 {
3036     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3037     TCGv_i64 t0, t1;
3038     TCGv_i128 cmp, val;
3039 
3040     gen_lea_modrm(env, s, modrm);
3041 
3042     cmp = tcg_temp_new_i128();
3043     val = tcg_temp_new_i128();
3044     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3045     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3046 
3047     /* Only require atomic with LOCK; non-parallel handled in generator. */
3048     if (s->prefix & PREFIX_LOCK) {
3049         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3050     } else {
3051         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3052     }
3053 
3054     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3055 
3056     /* Determine success after the fact. */
3057     t0 = tcg_temp_new_i64();
3058     t1 = tcg_temp_new_i64();
3059     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3060     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3061     tcg_gen_or_i64(t0, t0, t1);
3062 
3063     /* Update Z. */
3064     gen_compute_eflags(s);
3065     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3066     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3067 
3068     /*
3069      * Extract the result values for the register pair.  We may do this
3070      * unconditionally, because on success (Z=1), the old value matches
3071      * the previous value in RDX:RAX.
3072      */
3073     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3074     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3075 }
3076 #endif
3077 
3078 /* convert one instruction. s->base.is_jmp is set if the translation must
3079    be stopped. Return the next pc value */
3080 static bool disas_insn(DisasContext *s, CPUState *cpu)
3081 {
3082     CPUX86State *env = cpu->env_ptr;
3083     int b, prefixes;
3084     int shift;
3085     MemOp ot, aflag, dflag;
3086     int modrm, reg, rm, mod, op, opreg, val;
3087     bool orig_cc_op_dirty = s->cc_op_dirty;
3088     CCOp orig_cc_op = s->cc_op;
3089     target_ulong orig_pc_save = s->pc_save;
3090 
3091     s->pc = s->base.pc_next;
3092     s->override = -1;
3093 #ifdef TARGET_X86_64
3094     s->rex_r = 0;
3095     s->rex_x = 0;
3096     s->rex_b = 0;
3097 #endif
3098     s->rip_offset = 0; /* for relative ip address */
3099     s->vex_l = 0;
3100     s->vex_v = 0;
3101     s->vex_w = false;
3102     switch (sigsetjmp(s->jmpbuf, 0)) {
3103     case 0:
3104         break;
3105     case 1:
3106         gen_exception_gpf(s);
3107         return true;
3108     case 2:
3109         /* Restore state that may affect the next instruction. */
3110         s->pc = s->base.pc_next;
3111         /*
3112          * TODO: These save/restore can be removed after the table-based
3113          * decoder is complete; we will be decoding the insn completely
3114          * before any code generation that might affect these variables.
3115          */
3116         s->cc_op_dirty = orig_cc_op_dirty;
3117         s->cc_op = orig_cc_op;
3118         s->pc_save = orig_pc_save;
3119         /* END TODO */
3120         s->base.num_insns--;
3121         tcg_remove_ops_after(s->prev_insn_end);
3122         s->base.is_jmp = DISAS_TOO_MANY;
3123         return false;
3124     default:
3125         g_assert_not_reached();
3126     }
3127 
3128     prefixes = 0;
3129 
3130  next_byte:
3131     s->prefix = prefixes;
3132     b = x86_ldub_code(env, s);
3133     /* Collect prefixes.  */
3134     switch (b) {
3135     default:
3136         break;
3137     case 0x0f:
3138         b = x86_ldub_code(env, s) + 0x100;
3139         break;
3140     case 0xf3:
3141         prefixes |= PREFIX_REPZ;
3142         prefixes &= ~PREFIX_REPNZ;
3143         goto next_byte;
3144     case 0xf2:
3145         prefixes |= PREFIX_REPNZ;
3146         prefixes &= ~PREFIX_REPZ;
3147         goto next_byte;
3148     case 0xf0:
3149         prefixes |= PREFIX_LOCK;
3150         goto next_byte;
3151     case 0x2e:
3152         s->override = R_CS;
3153         goto next_byte;
3154     case 0x36:
3155         s->override = R_SS;
3156         goto next_byte;
3157     case 0x3e:
3158         s->override = R_DS;
3159         goto next_byte;
3160     case 0x26:
3161         s->override = R_ES;
3162         goto next_byte;
3163     case 0x64:
3164         s->override = R_FS;
3165         goto next_byte;
3166     case 0x65:
3167         s->override = R_GS;
3168         goto next_byte;
3169     case 0x66:
3170         prefixes |= PREFIX_DATA;
3171         goto next_byte;
3172     case 0x67:
3173         prefixes |= PREFIX_ADR;
3174         goto next_byte;
3175 #ifdef TARGET_X86_64
3176     case 0x40 ... 0x4f:
3177         if (CODE64(s)) {
3178             /* REX prefix */
3179             prefixes |= PREFIX_REX;
3180             s->vex_w = (b >> 3) & 1;
3181             s->rex_r = (b & 0x4) << 1;
3182             s->rex_x = (b & 0x2) << 2;
3183             s->rex_b = (b & 0x1) << 3;
3184             goto next_byte;
3185         }
3186         break;
3187 #endif
3188     case 0xc5: /* 2-byte VEX */
3189     case 0xc4: /* 3-byte VEX */
3190         if (CODE32(s) && !VM86(s)) {
3191             int vex2 = x86_ldub_code(env, s);
3192             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3193 
3194             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3195                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3196                    otherwise the instruction is LES or LDS.  */
3197                 break;
3198             }
3199             disas_insn_new(s, cpu, b);
3200             return s->pc;
3201         }
3202         break;
3203     }
3204 
3205     /* Post-process prefixes.  */
3206     if (CODE64(s)) {
3207         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3208            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3209            over 0x66 if both are present.  */
3210         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3211         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3212         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3213     } else {
3214         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3215         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3216             dflag = MO_32;
3217         } else {
3218             dflag = MO_16;
3219         }
3220         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3221         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3222             aflag = MO_32;
3223         }  else {
3224             aflag = MO_16;
3225         }
3226     }
3227 
3228     s->prefix = prefixes;
3229     s->aflag = aflag;
3230     s->dflag = dflag;
3231 
3232     /* now check op code */
3233     switch (b) {
3234         /**************************/
3235         /* arith & logic */
3236     case 0x00 ... 0x05:
3237     case 0x08 ... 0x0d:
3238     case 0x10 ... 0x15:
3239     case 0x18 ... 0x1d:
3240     case 0x20 ... 0x25:
3241     case 0x28 ... 0x2d:
3242     case 0x30 ... 0x35:
3243     case 0x38 ... 0x3d:
3244         {
3245             int op, f, val;
3246             op = (b >> 3) & 7;
3247             f = (b >> 1) & 3;
3248 
3249             ot = mo_b_d(b, dflag);
3250 
3251             switch(f) {
3252             case 0: /* OP Ev, Gv */
3253                 modrm = x86_ldub_code(env, s);
3254                 reg = ((modrm >> 3) & 7) | REX_R(s);
3255                 mod = (modrm >> 6) & 3;
3256                 rm = (modrm & 7) | REX_B(s);
3257                 if (mod != 3) {
3258                     gen_lea_modrm(env, s, modrm);
3259                     opreg = OR_TMP0;
3260                 } else if (op == OP_XORL && rm == reg) {
3261                 xor_zero:
3262                     /* xor reg, reg optimisation */
3263                     set_cc_op(s, CC_OP_CLR);
3264                     tcg_gen_movi_tl(s->T0, 0);
3265                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3266                     break;
3267                 } else {
3268                     opreg = rm;
3269                 }
3270                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3271                 gen_op(s, op, ot, opreg);
3272                 break;
3273             case 1: /* OP Gv, Ev */
3274                 modrm = x86_ldub_code(env, s);
3275                 mod = (modrm >> 6) & 3;
3276                 reg = ((modrm >> 3) & 7) | REX_R(s);
3277                 rm = (modrm & 7) | REX_B(s);
3278                 if (mod != 3) {
3279                     gen_lea_modrm(env, s, modrm);
3280                     gen_op_ld_v(s, ot, s->T1, s->A0);
3281                 } else if (op == OP_XORL && rm == reg) {
3282                     goto xor_zero;
3283                 } else {
3284                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3285                 }
3286                 gen_op(s, op, ot, reg);
3287                 break;
3288             case 2: /* OP A, Iv */
3289                 val = insn_get(env, s, ot);
3290                 tcg_gen_movi_tl(s->T1, val);
3291                 gen_op(s, op, ot, OR_EAX);
3292                 break;
3293             }
3294         }
3295         break;
3296 
3297     case 0x82:
3298         if (CODE64(s))
3299             goto illegal_op;
3300         /* fall through */
3301     case 0x80: /* GRP1 */
3302     case 0x81:
3303     case 0x83:
3304         {
3305             int val;
3306 
3307             ot = mo_b_d(b, dflag);
3308 
3309             modrm = x86_ldub_code(env, s);
3310             mod = (modrm >> 6) & 3;
3311             rm = (modrm & 7) | REX_B(s);
3312             op = (modrm >> 3) & 7;
3313 
3314             if (mod != 3) {
3315                 if (b == 0x83)
3316                     s->rip_offset = 1;
3317                 else
3318                     s->rip_offset = insn_const_size(ot);
3319                 gen_lea_modrm(env, s, modrm);
3320                 opreg = OR_TMP0;
3321             } else {
3322                 opreg = rm;
3323             }
3324 
3325             switch(b) {
3326             default:
3327             case 0x80:
3328             case 0x81:
3329             case 0x82:
3330                 val = insn_get(env, s, ot);
3331                 break;
3332             case 0x83:
3333                 val = (int8_t)insn_get(env, s, MO_8);
3334                 break;
3335             }
3336             tcg_gen_movi_tl(s->T1, val);
3337             gen_op(s, op, ot, opreg);
3338         }
3339         break;
3340 
3341         /**************************/
3342         /* inc, dec, and other misc arith */
3343     case 0x40 ... 0x47: /* inc Gv */
3344         ot = dflag;
3345         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3346         break;
3347     case 0x48 ... 0x4f: /* dec Gv */
3348         ot = dflag;
3349         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3350         break;
3351     case 0xf6: /* GRP3 */
3352     case 0xf7:
3353         ot = mo_b_d(b, dflag);
3354 
3355         modrm = x86_ldub_code(env, s);
3356         mod = (modrm >> 6) & 3;
3357         rm = (modrm & 7) | REX_B(s);
3358         op = (modrm >> 3) & 7;
3359         if (mod != 3) {
3360             if (op == 0) {
3361                 s->rip_offset = insn_const_size(ot);
3362             }
3363             gen_lea_modrm(env, s, modrm);
3364             /* For those below that handle locked memory, don't load here.  */
3365             if (!(s->prefix & PREFIX_LOCK)
3366                 || op != 2) {
3367                 gen_op_ld_v(s, ot, s->T0, s->A0);
3368             }
3369         } else {
3370             gen_op_mov_v_reg(s, ot, s->T0, rm);
3371         }
3372 
3373         switch(op) {
3374         case 0: /* test */
3375             val = insn_get(env, s, ot);
3376             tcg_gen_movi_tl(s->T1, val);
3377             gen_op_testl_T0_T1_cc(s);
3378             set_cc_op(s, CC_OP_LOGICB + ot);
3379             break;
3380         case 2: /* not */
3381             if (s->prefix & PREFIX_LOCK) {
3382                 if (mod == 3) {
3383                     goto illegal_op;
3384                 }
3385                 tcg_gen_movi_tl(s->T0, ~0);
3386                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3387                                             s->mem_index, ot | MO_LE);
3388             } else {
3389                 tcg_gen_not_tl(s->T0, s->T0);
3390                 if (mod != 3) {
3391                     gen_op_st_v(s, ot, s->T0, s->A0);
3392                 } else {
3393                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3394                 }
3395             }
3396             break;
3397         case 3: /* neg */
3398             if (s->prefix & PREFIX_LOCK) {
3399                 TCGLabel *label1;
3400                 TCGv a0, t0, t1, t2;
3401 
3402                 if (mod == 3) {
3403                     goto illegal_op;
3404                 }
3405                 a0 = s->A0;
3406                 t0 = s->T0;
3407                 label1 = gen_new_label();
3408 
3409                 gen_set_label(label1);
3410                 t1 = tcg_temp_new();
3411                 t2 = tcg_temp_new();
3412                 tcg_gen_mov_tl(t2, t0);
3413                 tcg_gen_neg_tl(t1, t0);
3414                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3415                                           s->mem_index, ot | MO_LE);
3416                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3417 
3418                 tcg_gen_neg_tl(s->T0, t0);
3419             } else {
3420                 tcg_gen_neg_tl(s->T0, s->T0);
3421                 if (mod != 3) {
3422                     gen_op_st_v(s, ot, s->T0, s->A0);
3423                 } else {
3424                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3425                 }
3426             }
3427             gen_op_update_neg_cc(s);
3428             set_cc_op(s, CC_OP_SUBB + ot);
3429             break;
3430         case 4: /* mul */
3431             switch(ot) {
3432             case MO_8:
3433                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3434                 tcg_gen_ext8u_tl(s->T0, s->T0);
3435                 tcg_gen_ext8u_tl(s->T1, s->T1);
3436                 /* XXX: use 32 bit mul which could be faster */
3437                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3438                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3439                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3440                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3441                 set_cc_op(s, CC_OP_MULB);
3442                 break;
3443             case MO_16:
3444                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3445                 tcg_gen_ext16u_tl(s->T0, s->T0);
3446                 tcg_gen_ext16u_tl(s->T1, s->T1);
3447                 /* XXX: use 32 bit mul which could be faster */
3448                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3449                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3450                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3451                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3452                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3453                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3454                 set_cc_op(s, CC_OP_MULW);
3455                 break;
3456             default:
3457             case MO_32:
3458                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3459                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3460                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3461                                   s->tmp2_i32, s->tmp3_i32);
3462                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3463                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3464                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3465                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3466                 set_cc_op(s, CC_OP_MULL);
3467                 break;
3468 #ifdef TARGET_X86_64
3469             case MO_64:
3470                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3471                                   s->T0, cpu_regs[R_EAX]);
3472                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3473                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3474                 set_cc_op(s, CC_OP_MULQ);
3475                 break;
3476 #endif
3477             }
3478             break;
3479         case 5: /* imul */
3480             switch(ot) {
3481             case MO_8:
3482                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3483                 tcg_gen_ext8s_tl(s->T0, s->T0);
3484                 tcg_gen_ext8s_tl(s->T1, s->T1);
3485                 /* XXX: use 32 bit mul which could be faster */
3486                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3487                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3488                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3489                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3490                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3491                 set_cc_op(s, CC_OP_MULB);
3492                 break;
3493             case MO_16:
3494                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3495                 tcg_gen_ext16s_tl(s->T0, s->T0);
3496                 tcg_gen_ext16s_tl(s->T1, s->T1);
3497                 /* XXX: use 32 bit mul which could be faster */
3498                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3499                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3500                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3501                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3502                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3503                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3504                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3505                 set_cc_op(s, CC_OP_MULW);
3506                 break;
3507             default:
3508             case MO_32:
3509                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3510                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3511                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3512                                   s->tmp2_i32, s->tmp3_i32);
3513                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3514                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3515                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3516                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3517                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3518                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3519                 set_cc_op(s, CC_OP_MULL);
3520                 break;
3521 #ifdef TARGET_X86_64
3522             case MO_64:
3523                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3524                                   s->T0, cpu_regs[R_EAX]);
3525                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3526                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3527                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3528                 set_cc_op(s, CC_OP_MULQ);
3529                 break;
3530 #endif
3531             }
3532             break;
3533         case 6: /* div */
3534             switch(ot) {
3535             case MO_8:
3536                 gen_helper_divb_AL(cpu_env, s->T0);
3537                 break;
3538             case MO_16:
3539                 gen_helper_divw_AX(cpu_env, s->T0);
3540                 break;
3541             default:
3542             case MO_32:
3543                 gen_helper_divl_EAX(cpu_env, s->T0);
3544                 break;
3545 #ifdef TARGET_X86_64
3546             case MO_64:
3547                 gen_helper_divq_EAX(cpu_env, s->T0);
3548                 break;
3549 #endif
3550             }
3551             break;
3552         case 7: /* idiv */
3553             switch(ot) {
3554             case MO_8:
3555                 gen_helper_idivb_AL(cpu_env, s->T0);
3556                 break;
3557             case MO_16:
3558                 gen_helper_idivw_AX(cpu_env, s->T0);
3559                 break;
3560             default:
3561             case MO_32:
3562                 gen_helper_idivl_EAX(cpu_env, s->T0);
3563                 break;
3564 #ifdef TARGET_X86_64
3565             case MO_64:
3566                 gen_helper_idivq_EAX(cpu_env, s->T0);
3567                 break;
3568 #endif
3569             }
3570             break;
3571         default:
3572             goto unknown_op;
3573         }
3574         break;
3575 
3576     case 0xfe: /* GRP4 */
3577     case 0xff: /* GRP5 */
3578         ot = mo_b_d(b, dflag);
3579 
3580         modrm = x86_ldub_code(env, s);
3581         mod = (modrm >> 6) & 3;
3582         rm = (modrm & 7) | REX_B(s);
3583         op = (modrm >> 3) & 7;
3584         if (op >= 2 && b == 0xfe) {
3585             goto unknown_op;
3586         }
3587         if (CODE64(s)) {
3588             if (op == 2 || op == 4) {
3589                 /* operand size for jumps is 64 bit */
3590                 ot = MO_64;
3591             } else if (op == 3 || op == 5) {
3592                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3593             } else if (op == 6) {
3594                 /* default push size is 64 bit */
3595                 ot = mo_pushpop(s, dflag);
3596             }
3597         }
3598         if (mod != 3) {
3599             gen_lea_modrm(env, s, modrm);
3600             if (op >= 2 && op != 3 && op != 5)
3601                 gen_op_ld_v(s, ot, s->T0, s->A0);
3602         } else {
3603             gen_op_mov_v_reg(s, ot, s->T0, rm);
3604         }
3605 
3606         switch(op) {
3607         case 0: /* inc Ev */
3608             if (mod != 3)
3609                 opreg = OR_TMP0;
3610             else
3611                 opreg = rm;
3612             gen_inc(s, ot, opreg, 1);
3613             break;
3614         case 1: /* dec Ev */
3615             if (mod != 3)
3616                 opreg = OR_TMP0;
3617             else
3618                 opreg = rm;
3619             gen_inc(s, ot, opreg, -1);
3620             break;
3621         case 2: /* call Ev */
3622             /* XXX: optimize if memory (no 'and' is necessary) */
3623             if (dflag == MO_16) {
3624                 tcg_gen_ext16u_tl(s->T0, s->T0);
3625             }
3626             gen_push_v(s, eip_next_tl(s));
3627             gen_op_jmp_v(s, s->T0);
3628             gen_bnd_jmp(s);
3629             s->base.is_jmp = DISAS_JUMP;
3630             break;
3631         case 3: /* lcall Ev */
3632             if (mod == 3) {
3633                 goto illegal_op;
3634             }
3635             gen_op_ld_v(s, ot, s->T1, s->A0);
3636             gen_add_A0_im(s, 1 << ot);
3637             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3638         do_lcall:
3639             if (PE(s) && !VM86(s)) {
3640                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3641                 gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1,
3642                                            tcg_constant_i32(dflag - 1),
3643                                            eip_next_tl(s));
3644             } else {
3645                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3646                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3647                 gen_helper_lcall_real(cpu_env, s->tmp2_i32, s->tmp3_i32,
3648                                       tcg_constant_i32(dflag - 1),
3649                                       eip_next_i32(s));
3650             }
3651             s->base.is_jmp = DISAS_JUMP;
3652             break;
3653         case 4: /* jmp Ev */
3654             if (dflag == MO_16) {
3655                 tcg_gen_ext16u_tl(s->T0, s->T0);
3656             }
3657             gen_op_jmp_v(s, s->T0);
3658             gen_bnd_jmp(s);
3659             s->base.is_jmp = DISAS_JUMP;
3660             break;
3661         case 5: /* ljmp Ev */
3662             if (mod == 3) {
3663                 goto illegal_op;
3664             }
3665             gen_op_ld_v(s, ot, s->T1, s->A0);
3666             gen_add_A0_im(s, 1 << ot);
3667             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3668         do_ljmp:
3669             if (PE(s) && !VM86(s)) {
3670                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3671                 gen_helper_ljmp_protected(cpu_env, s->tmp2_i32, s->T1,
3672                                           eip_next_tl(s));
3673             } else {
3674                 gen_op_movl_seg_T0_vm(s, R_CS);
3675                 gen_op_jmp_v(s, s->T1);
3676             }
3677             s->base.is_jmp = DISAS_JUMP;
3678             break;
3679         case 6: /* push Ev */
3680             gen_push_v(s, s->T0);
3681             break;
3682         default:
3683             goto unknown_op;
3684         }
3685         break;
3686 
3687     case 0x84: /* test Ev, Gv */
3688     case 0x85:
3689         ot = mo_b_d(b, dflag);
3690 
3691         modrm = x86_ldub_code(env, s);
3692         reg = ((modrm >> 3) & 7) | REX_R(s);
3693 
3694         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3695         gen_op_mov_v_reg(s, ot, s->T1, reg);
3696         gen_op_testl_T0_T1_cc(s);
3697         set_cc_op(s, CC_OP_LOGICB + ot);
3698         break;
3699 
3700     case 0xa8: /* test eAX, Iv */
3701     case 0xa9:
3702         ot = mo_b_d(b, dflag);
3703         val = insn_get(env, s, ot);
3704 
3705         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3706         tcg_gen_movi_tl(s->T1, val);
3707         gen_op_testl_T0_T1_cc(s);
3708         set_cc_op(s, CC_OP_LOGICB + ot);
3709         break;
3710 
3711     case 0x98: /* CWDE/CBW */
3712         switch (dflag) {
3713 #ifdef TARGET_X86_64
3714         case MO_64:
3715             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3716             tcg_gen_ext32s_tl(s->T0, s->T0);
3717             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3718             break;
3719 #endif
3720         case MO_32:
3721             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3722             tcg_gen_ext16s_tl(s->T0, s->T0);
3723             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3724             break;
3725         case MO_16:
3726             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3727             tcg_gen_ext8s_tl(s->T0, s->T0);
3728             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3729             break;
3730         default:
3731             g_assert_not_reached();
3732         }
3733         break;
3734     case 0x99: /* CDQ/CWD */
3735         switch (dflag) {
3736 #ifdef TARGET_X86_64
3737         case MO_64:
3738             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3739             tcg_gen_sari_tl(s->T0, s->T0, 63);
3740             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3741             break;
3742 #endif
3743         case MO_32:
3744             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3745             tcg_gen_ext32s_tl(s->T0, s->T0);
3746             tcg_gen_sari_tl(s->T0, s->T0, 31);
3747             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3748             break;
3749         case MO_16:
3750             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3751             tcg_gen_ext16s_tl(s->T0, s->T0);
3752             tcg_gen_sari_tl(s->T0, s->T0, 15);
3753             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3754             break;
3755         default:
3756             g_assert_not_reached();
3757         }
3758         break;
3759     case 0x1af: /* imul Gv, Ev */
3760     case 0x69: /* imul Gv, Ev, I */
3761     case 0x6b:
3762         ot = dflag;
3763         modrm = x86_ldub_code(env, s);
3764         reg = ((modrm >> 3) & 7) | REX_R(s);
3765         if (b == 0x69)
3766             s->rip_offset = insn_const_size(ot);
3767         else if (b == 0x6b)
3768             s->rip_offset = 1;
3769         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3770         if (b == 0x69) {
3771             val = insn_get(env, s, ot);
3772             tcg_gen_movi_tl(s->T1, val);
3773         } else if (b == 0x6b) {
3774             val = (int8_t)insn_get(env, s, MO_8);
3775             tcg_gen_movi_tl(s->T1, val);
3776         } else {
3777             gen_op_mov_v_reg(s, ot, s->T1, reg);
3778         }
3779         switch (ot) {
3780 #ifdef TARGET_X86_64
3781         case MO_64:
3782             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3783             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3784             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3785             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3786             break;
3787 #endif
3788         case MO_32:
3789             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3790             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3791             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3792                               s->tmp2_i32, s->tmp3_i32);
3793             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3794             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3795             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3796             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3797             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3798             break;
3799         default:
3800             tcg_gen_ext16s_tl(s->T0, s->T0);
3801             tcg_gen_ext16s_tl(s->T1, s->T1);
3802             /* XXX: use 32 bit mul which could be faster */
3803             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3804             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3805             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3806             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3807             gen_op_mov_reg_v(s, ot, reg, s->T0);
3808             break;
3809         }
3810         set_cc_op(s, CC_OP_MULB + ot);
3811         break;
3812     case 0x1c0:
3813     case 0x1c1: /* xadd Ev, Gv */
3814         ot = mo_b_d(b, dflag);
3815         modrm = x86_ldub_code(env, s);
3816         reg = ((modrm >> 3) & 7) | REX_R(s);
3817         mod = (modrm >> 6) & 3;
3818         gen_op_mov_v_reg(s, ot, s->T0, reg);
3819         if (mod == 3) {
3820             rm = (modrm & 7) | REX_B(s);
3821             gen_op_mov_v_reg(s, ot, s->T1, rm);
3822             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3823             gen_op_mov_reg_v(s, ot, reg, s->T1);
3824             gen_op_mov_reg_v(s, ot, rm, s->T0);
3825         } else {
3826             gen_lea_modrm(env, s, modrm);
3827             if (s->prefix & PREFIX_LOCK) {
3828                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3829                                             s->mem_index, ot | MO_LE);
3830                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3831             } else {
3832                 gen_op_ld_v(s, ot, s->T1, s->A0);
3833                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3834                 gen_op_st_v(s, ot, s->T0, s->A0);
3835             }
3836             gen_op_mov_reg_v(s, ot, reg, s->T1);
3837         }
3838         gen_op_update2_cc(s);
3839         set_cc_op(s, CC_OP_ADDB + ot);
3840         break;
3841     case 0x1b0:
3842     case 0x1b1: /* cmpxchg Ev, Gv */
3843         {
3844             TCGv oldv, newv, cmpv, dest;
3845 
3846             ot = mo_b_d(b, dflag);
3847             modrm = x86_ldub_code(env, s);
3848             reg = ((modrm >> 3) & 7) | REX_R(s);
3849             mod = (modrm >> 6) & 3;
3850             oldv = tcg_temp_new();
3851             newv = tcg_temp_new();
3852             cmpv = tcg_temp_new();
3853             gen_op_mov_v_reg(s, ot, newv, reg);
3854             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3855             gen_extu(ot, cmpv);
3856             if (s->prefix & PREFIX_LOCK) {
3857                 if (mod == 3) {
3858                     goto illegal_op;
3859                 }
3860                 gen_lea_modrm(env, s, modrm);
3861                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3862                                           s->mem_index, ot | MO_LE);
3863             } else {
3864                 if (mod == 3) {
3865                     rm = (modrm & 7) | REX_B(s);
3866                     gen_op_mov_v_reg(s, ot, oldv, rm);
3867                     gen_extu(ot, oldv);
3868 
3869                     /*
3870                      * Unlike the memory case, where "the destination operand receives
3871                      * a write cycle without regard to the result of the comparison",
3872                      * rm must not be touched altogether if the write fails, including
3873                      * not zero-extending it on 64-bit processors.  So, precompute
3874                      * the result of a successful writeback and perform the movcond
3875                      * directly on cpu_regs.  Also need to write accumulator first, in
3876                      * case rm is part of RAX too.
3877                      */
3878                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3879                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3880                 } else {
3881                     gen_lea_modrm(env, s, modrm);
3882                     gen_op_ld_v(s, ot, oldv, s->A0);
3883 
3884                     /*
3885                      * Perform an unconditional store cycle like physical cpu;
3886                      * must be before changing accumulator to ensure
3887                      * idempotency if the store faults and the instruction
3888                      * is restarted
3889                      */
3890                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3891                     gen_op_st_v(s, ot, newv, s->A0);
3892                 }
3893             }
3894 	    /*
3895 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3896 	     * since it's dead here.
3897 	     */
3898             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3899             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3900             tcg_gen_mov_tl(cpu_cc_src, oldv);
3901             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3902             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3903             set_cc_op(s, CC_OP_SUBB + ot);
3904         }
3905         break;
3906     case 0x1c7: /* cmpxchg8b */
3907         modrm = x86_ldub_code(env, s);
3908         mod = (modrm >> 6) & 3;
3909         switch ((modrm >> 3) & 7) {
3910         case 1: /* CMPXCHG8, CMPXCHG16 */
3911             if (mod == 3) {
3912                 goto illegal_op;
3913             }
3914 #ifdef TARGET_X86_64
3915             if (dflag == MO_64) {
3916                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3917                     goto illegal_op;
3918                 }
3919                 gen_cmpxchg16b(s, env, modrm);
3920                 break;
3921             }
3922 #endif
3923             if (!(s->cpuid_features & CPUID_CX8)) {
3924                 goto illegal_op;
3925             }
3926             gen_cmpxchg8b(s, env, modrm);
3927             break;
3928 
3929         case 7: /* RDSEED, RDPID with f3 prefix */
3930             if (mod != 3 ||
3931                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3932                 goto illegal_op;
3933             }
3934             if (s->prefix & PREFIX_REPZ) {
3935                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3936                     goto illegal_op;
3937                 }
3938                 gen_helper_rdpid(s->T0, cpu_env);
3939                 rm = (modrm & 7) | REX_B(s);
3940                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3941                 break;
3942             } else {
3943                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3944                     goto illegal_op;
3945                 }
3946                 goto do_rdrand;
3947             }
3948 
3949         case 6: /* RDRAND */
3950             if (mod != 3 ||
3951                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3952                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3953                 goto illegal_op;
3954             }
3955         do_rdrand:
3956             translator_io_start(&s->base);
3957             gen_helper_rdrand(s->T0, cpu_env);
3958             rm = (modrm & 7) | REX_B(s);
3959             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3960             set_cc_op(s, CC_OP_EFLAGS);
3961             break;
3962 
3963         default:
3964             goto illegal_op;
3965         }
3966         break;
3967 
3968         /**************************/
3969         /* push/pop */
3970     case 0x50 ... 0x57: /* push */
3971         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3972         gen_push_v(s, s->T0);
3973         break;
3974     case 0x58 ... 0x5f: /* pop */
3975         ot = gen_pop_T0(s);
3976         /* NOTE: order is important for pop %sp */
3977         gen_pop_update(s, ot);
3978         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3979         break;
3980     case 0x60: /* pusha */
3981         if (CODE64(s))
3982             goto illegal_op;
3983         gen_pusha(s);
3984         break;
3985     case 0x61: /* popa */
3986         if (CODE64(s))
3987             goto illegal_op;
3988         gen_popa(s);
3989         break;
3990     case 0x68: /* push Iv */
3991     case 0x6a:
3992         ot = mo_pushpop(s, dflag);
3993         if (b == 0x68)
3994             val = insn_get(env, s, ot);
3995         else
3996             val = (int8_t)insn_get(env, s, MO_8);
3997         tcg_gen_movi_tl(s->T0, val);
3998         gen_push_v(s, s->T0);
3999         break;
4000     case 0x8f: /* pop Ev */
4001         modrm = x86_ldub_code(env, s);
4002         mod = (modrm >> 6) & 3;
4003         ot = gen_pop_T0(s);
4004         if (mod == 3) {
4005             /* NOTE: order is important for pop %sp */
4006             gen_pop_update(s, ot);
4007             rm = (modrm & 7) | REX_B(s);
4008             gen_op_mov_reg_v(s, ot, rm, s->T0);
4009         } else {
4010             /* NOTE: order is important too for MMU exceptions */
4011             s->popl_esp_hack = 1 << ot;
4012             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4013             s->popl_esp_hack = 0;
4014             gen_pop_update(s, ot);
4015         }
4016         break;
4017     case 0xc8: /* enter */
4018         {
4019             int level;
4020             val = x86_lduw_code(env, s);
4021             level = x86_ldub_code(env, s);
4022             gen_enter(s, val, level);
4023         }
4024         break;
4025     case 0xc9: /* leave */
4026         gen_leave(s);
4027         break;
4028     case 0x06: /* push es */
4029     case 0x0e: /* push cs */
4030     case 0x16: /* push ss */
4031     case 0x1e: /* push ds */
4032         if (CODE64(s))
4033             goto illegal_op;
4034         gen_op_movl_T0_seg(s, b >> 3);
4035         gen_push_v(s, s->T0);
4036         break;
4037     case 0x1a0: /* push fs */
4038     case 0x1a8: /* push gs */
4039         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4040         gen_push_v(s, s->T0);
4041         break;
4042     case 0x07: /* pop es */
4043     case 0x17: /* pop ss */
4044     case 0x1f: /* pop ds */
4045         if (CODE64(s))
4046             goto illegal_op;
4047         reg = b >> 3;
4048         ot = gen_pop_T0(s);
4049         gen_movl_seg_T0(s, reg);
4050         gen_pop_update(s, ot);
4051         break;
4052     case 0x1a1: /* pop fs */
4053     case 0x1a9: /* pop gs */
4054         ot = gen_pop_T0(s);
4055         gen_movl_seg_T0(s, (b >> 3) & 7);
4056         gen_pop_update(s, ot);
4057         break;
4058 
4059         /**************************/
4060         /* mov */
4061     case 0x88:
4062     case 0x89: /* mov Gv, Ev */
4063         ot = mo_b_d(b, dflag);
4064         modrm = x86_ldub_code(env, s);
4065         reg = ((modrm >> 3) & 7) | REX_R(s);
4066 
4067         /* generate a generic store */
4068         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4069         break;
4070     case 0xc6:
4071     case 0xc7: /* mov Ev, Iv */
4072         ot = mo_b_d(b, dflag);
4073         modrm = x86_ldub_code(env, s);
4074         mod = (modrm >> 6) & 3;
4075         if (mod != 3) {
4076             s->rip_offset = insn_const_size(ot);
4077             gen_lea_modrm(env, s, modrm);
4078         }
4079         val = insn_get(env, s, ot);
4080         tcg_gen_movi_tl(s->T0, val);
4081         if (mod != 3) {
4082             gen_op_st_v(s, ot, s->T0, s->A0);
4083         } else {
4084             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4085         }
4086         break;
4087     case 0x8a:
4088     case 0x8b: /* mov Ev, Gv */
4089         ot = mo_b_d(b, dflag);
4090         modrm = x86_ldub_code(env, s);
4091         reg = ((modrm >> 3) & 7) | REX_R(s);
4092 
4093         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4094         gen_op_mov_reg_v(s, ot, reg, s->T0);
4095         break;
4096     case 0x8e: /* mov seg, Gv */
4097         modrm = x86_ldub_code(env, s);
4098         reg = (modrm >> 3) & 7;
4099         if (reg >= 6 || reg == R_CS)
4100             goto illegal_op;
4101         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4102         gen_movl_seg_T0(s, reg);
4103         break;
4104     case 0x8c: /* mov Gv, seg */
4105         modrm = x86_ldub_code(env, s);
4106         reg = (modrm >> 3) & 7;
4107         mod = (modrm >> 6) & 3;
4108         if (reg >= 6)
4109             goto illegal_op;
4110         gen_op_movl_T0_seg(s, reg);
4111         ot = mod == 3 ? dflag : MO_16;
4112         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4113         break;
4114 
4115     case 0x1b6: /* movzbS Gv, Eb */
4116     case 0x1b7: /* movzwS Gv, Eb */
4117     case 0x1be: /* movsbS Gv, Eb */
4118     case 0x1bf: /* movswS Gv, Eb */
4119         {
4120             MemOp d_ot;
4121             MemOp s_ot;
4122 
4123             /* d_ot is the size of destination */
4124             d_ot = dflag;
4125             /* ot is the size of source */
4126             ot = (b & 1) + MO_8;
4127             /* s_ot is the sign+size of source */
4128             s_ot = b & 8 ? MO_SIGN | ot : ot;
4129 
4130             modrm = x86_ldub_code(env, s);
4131             reg = ((modrm >> 3) & 7) | REX_R(s);
4132             mod = (modrm >> 6) & 3;
4133             rm = (modrm & 7) | REX_B(s);
4134 
4135             if (mod == 3) {
4136                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4137                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4138                 } else {
4139                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4140                     switch (s_ot) {
4141                     case MO_UB:
4142                         tcg_gen_ext8u_tl(s->T0, s->T0);
4143                         break;
4144                     case MO_SB:
4145                         tcg_gen_ext8s_tl(s->T0, s->T0);
4146                         break;
4147                     case MO_UW:
4148                         tcg_gen_ext16u_tl(s->T0, s->T0);
4149                         break;
4150                     default:
4151                     case MO_SW:
4152                         tcg_gen_ext16s_tl(s->T0, s->T0);
4153                         break;
4154                     }
4155                 }
4156                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4157             } else {
4158                 gen_lea_modrm(env, s, modrm);
4159                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4160                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4161             }
4162         }
4163         break;
4164 
4165     case 0x8d: /* lea */
4166         modrm = x86_ldub_code(env, s);
4167         mod = (modrm >> 6) & 3;
4168         if (mod == 3)
4169             goto illegal_op;
4170         reg = ((modrm >> 3) & 7) | REX_R(s);
4171         {
4172             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4173             TCGv ea = gen_lea_modrm_1(s, a, false);
4174             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4175             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4176         }
4177         break;
4178 
4179     case 0xa0: /* mov EAX, Ov */
4180     case 0xa1:
4181     case 0xa2: /* mov Ov, EAX */
4182     case 0xa3:
4183         {
4184             target_ulong offset_addr;
4185 
4186             ot = mo_b_d(b, dflag);
4187             offset_addr = insn_get_addr(env, s, s->aflag);
4188             tcg_gen_movi_tl(s->A0, offset_addr);
4189             gen_add_A0_ds_seg(s);
4190             if ((b & 2) == 0) {
4191                 gen_op_ld_v(s, ot, s->T0, s->A0);
4192                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4193             } else {
4194                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4195                 gen_op_st_v(s, ot, s->T0, s->A0);
4196             }
4197         }
4198         break;
4199     case 0xd7: /* xlat */
4200         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4201         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4202         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4203         gen_extu(s->aflag, s->A0);
4204         gen_add_A0_ds_seg(s);
4205         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4206         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4207         break;
4208     case 0xb0 ... 0xb7: /* mov R, Ib */
4209         val = insn_get(env, s, MO_8);
4210         tcg_gen_movi_tl(s->T0, val);
4211         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4212         break;
4213     case 0xb8 ... 0xbf: /* mov R, Iv */
4214 #ifdef TARGET_X86_64
4215         if (dflag == MO_64) {
4216             uint64_t tmp;
4217             /* 64 bit case */
4218             tmp = x86_ldq_code(env, s);
4219             reg = (b & 7) | REX_B(s);
4220             tcg_gen_movi_tl(s->T0, tmp);
4221             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4222         } else
4223 #endif
4224         {
4225             ot = dflag;
4226             val = insn_get(env, s, ot);
4227             reg = (b & 7) | REX_B(s);
4228             tcg_gen_movi_tl(s->T0, val);
4229             gen_op_mov_reg_v(s, ot, reg, s->T0);
4230         }
4231         break;
4232 
4233     case 0x91 ... 0x97: /* xchg R, EAX */
4234     do_xchg_reg_eax:
4235         ot = dflag;
4236         reg = (b & 7) | REX_B(s);
4237         rm = R_EAX;
4238         goto do_xchg_reg;
4239     case 0x86:
4240     case 0x87: /* xchg Ev, Gv */
4241         ot = mo_b_d(b, dflag);
4242         modrm = x86_ldub_code(env, s);
4243         reg = ((modrm >> 3) & 7) | REX_R(s);
4244         mod = (modrm >> 6) & 3;
4245         if (mod == 3) {
4246             rm = (modrm & 7) | REX_B(s);
4247         do_xchg_reg:
4248             gen_op_mov_v_reg(s, ot, s->T0, reg);
4249             gen_op_mov_v_reg(s, ot, s->T1, rm);
4250             gen_op_mov_reg_v(s, ot, rm, s->T0);
4251             gen_op_mov_reg_v(s, ot, reg, s->T1);
4252         } else {
4253             gen_lea_modrm(env, s, modrm);
4254             gen_op_mov_v_reg(s, ot, s->T0, reg);
4255             /* for xchg, lock is implicit */
4256             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4257                                    s->mem_index, ot | MO_LE);
4258             gen_op_mov_reg_v(s, ot, reg, s->T1);
4259         }
4260         break;
4261     case 0xc4: /* les Gv */
4262         /* In CODE64 this is VEX3; see above.  */
4263         op = R_ES;
4264         goto do_lxx;
4265     case 0xc5: /* lds Gv */
4266         /* In CODE64 this is VEX2; see above.  */
4267         op = R_DS;
4268         goto do_lxx;
4269     case 0x1b2: /* lss Gv */
4270         op = R_SS;
4271         goto do_lxx;
4272     case 0x1b4: /* lfs Gv */
4273         op = R_FS;
4274         goto do_lxx;
4275     case 0x1b5: /* lgs Gv */
4276         op = R_GS;
4277     do_lxx:
4278         ot = dflag != MO_16 ? MO_32 : MO_16;
4279         modrm = x86_ldub_code(env, s);
4280         reg = ((modrm >> 3) & 7) | REX_R(s);
4281         mod = (modrm >> 6) & 3;
4282         if (mod == 3)
4283             goto illegal_op;
4284         gen_lea_modrm(env, s, modrm);
4285         gen_op_ld_v(s, ot, s->T1, s->A0);
4286         gen_add_A0_im(s, 1 << ot);
4287         /* load the segment first to handle exceptions properly */
4288         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4289         gen_movl_seg_T0(s, op);
4290         /* then put the data */
4291         gen_op_mov_reg_v(s, ot, reg, s->T1);
4292         break;
4293 
4294         /************************/
4295         /* shifts */
4296     case 0xc0:
4297     case 0xc1:
4298         /* shift Ev,Ib */
4299         shift = 2;
4300     grp2:
4301         {
4302             ot = mo_b_d(b, dflag);
4303             modrm = x86_ldub_code(env, s);
4304             mod = (modrm >> 6) & 3;
4305             op = (modrm >> 3) & 7;
4306 
4307             if (mod != 3) {
4308                 if (shift == 2) {
4309                     s->rip_offset = 1;
4310                 }
4311                 gen_lea_modrm(env, s, modrm);
4312                 opreg = OR_TMP0;
4313             } else {
4314                 opreg = (modrm & 7) | REX_B(s);
4315             }
4316 
4317             /* simpler op */
4318             if (shift == 0) {
4319                 gen_shift(s, op, ot, opreg, OR_ECX);
4320             } else {
4321                 if (shift == 2) {
4322                     shift = x86_ldub_code(env, s);
4323                 }
4324                 gen_shifti(s, op, ot, opreg, shift);
4325             }
4326         }
4327         break;
4328     case 0xd0:
4329     case 0xd1:
4330         /* shift Ev,1 */
4331         shift = 1;
4332         goto grp2;
4333     case 0xd2:
4334     case 0xd3:
4335         /* shift Ev,cl */
4336         shift = 0;
4337         goto grp2;
4338 
4339     case 0x1a4: /* shld imm */
4340         op = 0;
4341         shift = 1;
4342         goto do_shiftd;
4343     case 0x1a5: /* shld cl */
4344         op = 0;
4345         shift = 0;
4346         goto do_shiftd;
4347     case 0x1ac: /* shrd imm */
4348         op = 1;
4349         shift = 1;
4350         goto do_shiftd;
4351     case 0x1ad: /* shrd cl */
4352         op = 1;
4353         shift = 0;
4354     do_shiftd:
4355         ot = dflag;
4356         modrm = x86_ldub_code(env, s);
4357         mod = (modrm >> 6) & 3;
4358         rm = (modrm & 7) | REX_B(s);
4359         reg = ((modrm >> 3) & 7) | REX_R(s);
4360         if (mod != 3) {
4361             gen_lea_modrm(env, s, modrm);
4362             opreg = OR_TMP0;
4363         } else {
4364             opreg = rm;
4365         }
4366         gen_op_mov_v_reg(s, ot, s->T1, reg);
4367 
4368         if (shift) {
4369             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4370             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4371         } else {
4372             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4373         }
4374         break;
4375 
4376         /************************/
4377         /* floats */
4378     case 0xd8 ... 0xdf:
4379         {
4380             bool update_fip = true;
4381 
4382             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4383                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4384                 /* XXX: what to do if illegal op ? */
4385                 gen_exception(s, EXCP07_PREX);
4386                 break;
4387             }
4388             modrm = x86_ldub_code(env, s);
4389             mod = (modrm >> 6) & 3;
4390             rm = modrm & 7;
4391             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4392             if (mod != 3) {
4393                 /* memory op */
4394                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4395                 TCGv ea = gen_lea_modrm_1(s, a, false);
4396                 TCGv last_addr = tcg_temp_new();
4397                 bool update_fdp = true;
4398 
4399                 tcg_gen_mov_tl(last_addr, ea);
4400                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4401 
4402                 switch (op) {
4403                 case 0x00 ... 0x07: /* fxxxs */
4404                 case 0x10 ... 0x17: /* fixxxl */
4405                 case 0x20 ... 0x27: /* fxxxl */
4406                 case 0x30 ... 0x37: /* fixxx */
4407                     {
4408                         int op1;
4409                         op1 = op & 7;
4410 
4411                         switch (op >> 4) {
4412                         case 0:
4413                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4414                                                 s->mem_index, MO_LEUL);
4415                             gen_helper_flds_FT0(cpu_env, s->tmp2_i32);
4416                             break;
4417                         case 1:
4418                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4419                                                 s->mem_index, MO_LEUL);
4420                             gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4421                             break;
4422                         case 2:
4423                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4424                                                 s->mem_index, MO_LEUQ);
4425                             gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
4426                             break;
4427                         case 3:
4428                         default:
4429                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4430                                                 s->mem_index, MO_LESW);
4431                             gen_helper_fildl_FT0(cpu_env, s->tmp2_i32);
4432                             break;
4433                         }
4434 
4435                         gen_helper_fp_arith_ST0_FT0(op1);
4436                         if (op1 == 3) {
4437                             /* fcomp needs pop */
4438                             gen_helper_fpop(cpu_env);
4439                         }
4440                     }
4441                     break;
4442                 case 0x08: /* flds */
4443                 case 0x0a: /* fsts */
4444                 case 0x0b: /* fstps */
4445                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4446                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4447                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4448                     switch (op & 7) {
4449                     case 0:
4450                         switch (op >> 4) {
4451                         case 0:
4452                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4453                                                 s->mem_index, MO_LEUL);
4454                             gen_helper_flds_ST0(cpu_env, s->tmp2_i32);
4455                             break;
4456                         case 1:
4457                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4458                                                 s->mem_index, MO_LEUL);
4459                             gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4460                             break;
4461                         case 2:
4462                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4463                                                 s->mem_index, MO_LEUQ);
4464                             gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
4465                             break;
4466                         case 3:
4467                         default:
4468                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4469                                                 s->mem_index, MO_LESW);
4470                             gen_helper_fildl_ST0(cpu_env, s->tmp2_i32);
4471                             break;
4472                         }
4473                         break;
4474                     case 1:
4475                         /* XXX: the corresponding CPUID bit must be tested ! */
4476                         switch (op >> 4) {
4477                         case 1:
4478                             gen_helper_fisttl_ST0(s->tmp2_i32, cpu_env);
4479                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4480                                                 s->mem_index, MO_LEUL);
4481                             break;
4482                         case 2:
4483                             gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
4484                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4485                                                 s->mem_index, MO_LEUQ);
4486                             break;
4487                         case 3:
4488                         default:
4489                             gen_helper_fistt_ST0(s->tmp2_i32, cpu_env);
4490                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4491                                                 s->mem_index, MO_LEUW);
4492                             break;
4493                         }
4494                         gen_helper_fpop(cpu_env);
4495                         break;
4496                     default:
4497                         switch (op >> 4) {
4498                         case 0:
4499                             gen_helper_fsts_ST0(s->tmp2_i32, cpu_env);
4500                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4501                                                 s->mem_index, MO_LEUL);
4502                             break;
4503                         case 1:
4504                             gen_helper_fistl_ST0(s->tmp2_i32, cpu_env);
4505                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4506                                                 s->mem_index, MO_LEUL);
4507                             break;
4508                         case 2:
4509                             gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
4510                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4511                                                 s->mem_index, MO_LEUQ);
4512                             break;
4513                         case 3:
4514                         default:
4515                             gen_helper_fist_ST0(s->tmp2_i32, cpu_env);
4516                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4517                                                 s->mem_index, MO_LEUW);
4518                             break;
4519                         }
4520                         if ((op & 7) == 3) {
4521                             gen_helper_fpop(cpu_env);
4522                         }
4523                         break;
4524                     }
4525                     break;
4526                 case 0x0c: /* fldenv mem */
4527                     gen_helper_fldenv(cpu_env, s->A0,
4528                                       tcg_constant_i32(dflag - 1));
4529                     update_fip = update_fdp = false;
4530                     break;
4531                 case 0x0d: /* fldcw mem */
4532                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4533                                         s->mem_index, MO_LEUW);
4534                     gen_helper_fldcw(cpu_env, s->tmp2_i32);
4535                     update_fip = update_fdp = false;
4536                     break;
4537                 case 0x0e: /* fnstenv mem */
4538                     gen_helper_fstenv(cpu_env, s->A0,
4539                                       tcg_constant_i32(dflag - 1));
4540                     update_fip = update_fdp = false;
4541                     break;
4542                 case 0x0f: /* fnstcw mem */
4543                     gen_helper_fnstcw(s->tmp2_i32, cpu_env);
4544                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4545                                         s->mem_index, MO_LEUW);
4546                     update_fip = update_fdp = false;
4547                     break;
4548                 case 0x1d: /* fldt mem */
4549                     gen_helper_fldt_ST0(cpu_env, s->A0);
4550                     break;
4551                 case 0x1f: /* fstpt mem */
4552                     gen_helper_fstt_ST0(cpu_env, s->A0);
4553                     gen_helper_fpop(cpu_env);
4554                     break;
4555                 case 0x2c: /* frstor mem */
4556                     gen_helper_frstor(cpu_env, s->A0,
4557                                       tcg_constant_i32(dflag - 1));
4558                     update_fip = update_fdp = false;
4559                     break;
4560                 case 0x2e: /* fnsave mem */
4561                     gen_helper_fsave(cpu_env, s->A0,
4562                                      tcg_constant_i32(dflag - 1));
4563                     update_fip = update_fdp = false;
4564                     break;
4565                 case 0x2f: /* fnstsw mem */
4566                     gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4567                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4568                                         s->mem_index, MO_LEUW);
4569                     update_fip = update_fdp = false;
4570                     break;
4571                 case 0x3c: /* fbld */
4572                     gen_helper_fbld_ST0(cpu_env, s->A0);
4573                     break;
4574                 case 0x3e: /* fbstp */
4575                     gen_helper_fbst_ST0(cpu_env, s->A0);
4576                     gen_helper_fpop(cpu_env);
4577                     break;
4578                 case 0x3d: /* fildll */
4579                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4580                                         s->mem_index, MO_LEUQ);
4581                     gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
4582                     break;
4583                 case 0x3f: /* fistpll */
4584                     gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
4585                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4586                                         s->mem_index, MO_LEUQ);
4587                     gen_helper_fpop(cpu_env);
4588                     break;
4589                 default:
4590                     goto unknown_op;
4591                 }
4592 
4593                 if (update_fdp) {
4594                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4595 
4596                     tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4597                                    offsetof(CPUX86State,
4598                                             segs[last_seg].selector));
4599                     tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4600                                      offsetof(CPUX86State, fpds));
4601                     tcg_gen_st_tl(last_addr, cpu_env,
4602                                   offsetof(CPUX86State, fpdp));
4603                 }
4604             } else {
4605                 /* register float ops */
4606                 opreg = rm;
4607 
4608                 switch (op) {
4609                 case 0x08: /* fld sti */
4610                     gen_helper_fpush(cpu_env);
4611                     gen_helper_fmov_ST0_STN(cpu_env,
4612                                             tcg_constant_i32((opreg + 1) & 7));
4613                     break;
4614                 case 0x09: /* fxchg sti */
4615                 case 0x29: /* fxchg4 sti, undocumented op */
4616                 case 0x39: /* fxchg7 sti, undocumented op */
4617                     gen_helper_fxchg_ST0_STN(cpu_env, tcg_constant_i32(opreg));
4618                     break;
4619                 case 0x0a: /* grp d9/2 */
4620                     switch (rm) {
4621                     case 0: /* fnop */
4622                         /* check exceptions (FreeBSD FPU probe) */
4623                         gen_helper_fwait(cpu_env);
4624                         update_fip = false;
4625                         break;
4626                     default:
4627                         goto unknown_op;
4628                     }
4629                     break;
4630                 case 0x0c: /* grp d9/4 */
4631                     switch (rm) {
4632                     case 0: /* fchs */
4633                         gen_helper_fchs_ST0(cpu_env);
4634                         break;
4635                     case 1: /* fabs */
4636                         gen_helper_fabs_ST0(cpu_env);
4637                         break;
4638                     case 4: /* ftst */
4639                         gen_helper_fldz_FT0(cpu_env);
4640                         gen_helper_fcom_ST0_FT0(cpu_env);
4641                         break;
4642                     case 5: /* fxam */
4643                         gen_helper_fxam_ST0(cpu_env);
4644                         break;
4645                     default:
4646                         goto unknown_op;
4647                     }
4648                     break;
4649                 case 0x0d: /* grp d9/5 */
4650                     {
4651                         switch (rm) {
4652                         case 0:
4653                             gen_helper_fpush(cpu_env);
4654                             gen_helper_fld1_ST0(cpu_env);
4655                             break;
4656                         case 1:
4657                             gen_helper_fpush(cpu_env);
4658                             gen_helper_fldl2t_ST0(cpu_env);
4659                             break;
4660                         case 2:
4661                             gen_helper_fpush(cpu_env);
4662                             gen_helper_fldl2e_ST0(cpu_env);
4663                             break;
4664                         case 3:
4665                             gen_helper_fpush(cpu_env);
4666                             gen_helper_fldpi_ST0(cpu_env);
4667                             break;
4668                         case 4:
4669                             gen_helper_fpush(cpu_env);
4670                             gen_helper_fldlg2_ST0(cpu_env);
4671                             break;
4672                         case 5:
4673                             gen_helper_fpush(cpu_env);
4674                             gen_helper_fldln2_ST0(cpu_env);
4675                             break;
4676                         case 6:
4677                             gen_helper_fpush(cpu_env);
4678                             gen_helper_fldz_ST0(cpu_env);
4679                             break;
4680                         default:
4681                             goto unknown_op;
4682                         }
4683                     }
4684                     break;
4685                 case 0x0e: /* grp d9/6 */
4686                     switch (rm) {
4687                     case 0: /* f2xm1 */
4688                         gen_helper_f2xm1(cpu_env);
4689                         break;
4690                     case 1: /* fyl2x */
4691                         gen_helper_fyl2x(cpu_env);
4692                         break;
4693                     case 2: /* fptan */
4694                         gen_helper_fptan(cpu_env);
4695                         break;
4696                     case 3: /* fpatan */
4697                         gen_helper_fpatan(cpu_env);
4698                         break;
4699                     case 4: /* fxtract */
4700                         gen_helper_fxtract(cpu_env);
4701                         break;
4702                     case 5: /* fprem1 */
4703                         gen_helper_fprem1(cpu_env);
4704                         break;
4705                     case 6: /* fdecstp */
4706                         gen_helper_fdecstp(cpu_env);
4707                         break;
4708                     default:
4709                     case 7: /* fincstp */
4710                         gen_helper_fincstp(cpu_env);
4711                         break;
4712                     }
4713                     break;
4714                 case 0x0f: /* grp d9/7 */
4715                     switch (rm) {
4716                     case 0: /* fprem */
4717                         gen_helper_fprem(cpu_env);
4718                         break;
4719                     case 1: /* fyl2xp1 */
4720                         gen_helper_fyl2xp1(cpu_env);
4721                         break;
4722                     case 2: /* fsqrt */
4723                         gen_helper_fsqrt(cpu_env);
4724                         break;
4725                     case 3: /* fsincos */
4726                         gen_helper_fsincos(cpu_env);
4727                         break;
4728                     case 5: /* fscale */
4729                         gen_helper_fscale(cpu_env);
4730                         break;
4731                     case 4: /* frndint */
4732                         gen_helper_frndint(cpu_env);
4733                         break;
4734                     case 6: /* fsin */
4735                         gen_helper_fsin(cpu_env);
4736                         break;
4737                     default:
4738                     case 7: /* fcos */
4739                         gen_helper_fcos(cpu_env);
4740                         break;
4741                     }
4742                     break;
4743                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4744                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4745                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4746                     {
4747                         int op1;
4748 
4749                         op1 = op & 7;
4750                         if (op >= 0x20) {
4751                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4752                             if (op >= 0x30) {
4753                                 gen_helper_fpop(cpu_env);
4754                             }
4755                         } else {
4756                             gen_helper_fmov_FT0_STN(cpu_env,
4757                                                     tcg_constant_i32(opreg));
4758                             gen_helper_fp_arith_ST0_FT0(op1);
4759                         }
4760                     }
4761                     break;
4762                 case 0x02: /* fcom */
4763                 case 0x22: /* fcom2, undocumented op */
4764                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4765                     gen_helper_fcom_ST0_FT0(cpu_env);
4766                     break;
4767                 case 0x03: /* fcomp */
4768                 case 0x23: /* fcomp3, undocumented op */
4769                 case 0x32: /* fcomp5, undocumented op */
4770                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4771                     gen_helper_fcom_ST0_FT0(cpu_env);
4772                     gen_helper_fpop(cpu_env);
4773                     break;
4774                 case 0x15: /* da/5 */
4775                     switch (rm) {
4776                     case 1: /* fucompp */
4777                         gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4778                         gen_helper_fucom_ST0_FT0(cpu_env);
4779                         gen_helper_fpop(cpu_env);
4780                         gen_helper_fpop(cpu_env);
4781                         break;
4782                     default:
4783                         goto unknown_op;
4784                     }
4785                     break;
4786                 case 0x1c:
4787                     switch (rm) {
4788                     case 0: /* feni (287 only, just do nop here) */
4789                         break;
4790                     case 1: /* fdisi (287 only, just do nop here) */
4791                         break;
4792                     case 2: /* fclex */
4793                         gen_helper_fclex(cpu_env);
4794                         update_fip = false;
4795                         break;
4796                     case 3: /* fninit */
4797                         gen_helper_fninit(cpu_env);
4798                         update_fip = false;
4799                         break;
4800                     case 4: /* fsetpm (287 only, just do nop here) */
4801                         break;
4802                     default:
4803                         goto unknown_op;
4804                     }
4805                     break;
4806                 case 0x1d: /* fucomi */
4807                     if (!(s->cpuid_features & CPUID_CMOV)) {
4808                         goto illegal_op;
4809                     }
4810                     gen_update_cc_op(s);
4811                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4812                     gen_helper_fucomi_ST0_FT0(cpu_env);
4813                     set_cc_op(s, CC_OP_EFLAGS);
4814                     break;
4815                 case 0x1e: /* fcomi */
4816                     if (!(s->cpuid_features & CPUID_CMOV)) {
4817                         goto illegal_op;
4818                     }
4819                     gen_update_cc_op(s);
4820                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4821                     gen_helper_fcomi_ST0_FT0(cpu_env);
4822                     set_cc_op(s, CC_OP_EFLAGS);
4823                     break;
4824                 case 0x28: /* ffree sti */
4825                     gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4826                     break;
4827                 case 0x2a: /* fst sti */
4828                     gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4829                     break;
4830                 case 0x2b: /* fstp sti */
4831                 case 0x0b: /* fstp1 sti, undocumented op */
4832                 case 0x3a: /* fstp8 sti, undocumented op */
4833                 case 0x3b: /* fstp9 sti, undocumented op */
4834                     gen_helper_fmov_STN_ST0(cpu_env, tcg_constant_i32(opreg));
4835                     gen_helper_fpop(cpu_env);
4836                     break;
4837                 case 0x2c: /* fucom st(i) */
4838                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4839                     gen_helper_fucom_ST0_FT0(cpu_env);
4840                     break;
4841                 case 0x2d: /* fucomp st(i) */
4842                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4843                     gen_helper_fucom_ST0_FT0(cpu_env);
4844                     gen_helper_fpop(cpu_env);
4845                     break;
4846                 case 0x33: /* de/3 */
4847                     switch (rm) {
4848                     case 1: /* fcompp */
4849                         gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(1));
4850                         gen_helper_fcom_ST0_FT0(cpu_env);
4851                         gen_helper_fpop(cpu_env);
4852                         gen_helper_fpop(cpu_env);
4853                         break;
4854                     default:
4855                         goto unknown_op;
4856                     }
4857                     break;
4858                 case 0x38: /* ffreep sti, undocumented op */
4859                     gen_helper_ffree_STN(cpu_env, tcg_constant_i32(opreg));
4860                     gen_helper_fpop(cpu_env);
4861                     break;
4862                 case 0x3c: /* df/4 */
4863                     switch (rm) {
4864                     case 0:
4865                         gen_helper_fnstsw(s->tmp2_i32, cpu_env);
4866                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4867                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4868                         break;
4869                     default:
4870                         goto unknown_op;
4871                     }
4872                     break;
4873                 case 0x3d: /* fucomip */
4874                     if (!(s->cpuid_features & CPUID_CMOV)) {
4875                         goto illegal_op;
4876                     }
4877                     gen_update_cc_op(s);
4878                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4879                     gen_helper_fucomi_ST0_FT0(cpu_env);
4880                     gen_helper_fpop(cpu_env);
4881                     set_cc_op(s, CC_OP_EFLAGS);
4882                     break;
4883                 case 0x3e: /* fcomip */
4884                     if (!(s->cpuid_features & CPUID_CMOV)) {
4885                         goto illegal_op;
4886                     }
4887                     gen_update_cc_op(s);
4888                     gen_helper_fmov_FT0_STN(cpu_env, tcg_constant_i32(opreg));
4889                     gen_helper_fcomi_ST0_FT0(cpu_env);
4890                     gen_helper_fpop(cpu_env);
4891                     set_cc_op(s, CC_OP_EFLAGS);
4892                     break;
4893                 case 0x10 ... 0x13: /* fcmovxx */
4894                 case 0x18 ... 0x1b:
4895                     {
4896                         int op1;
4897                         TCGLabel *l1;
4898                         static const uint8_t fcmov_cc[8] = {
4899                             (JCC_B << 1),
4900                             (JCC_Z << 1),
4901                             (JCC_BE << 1),
4902                             (JCC_P << 1),
4903                         };
4904 
4905                         if (!(s->cpuid_features & CPUID_CMOV)) {
4906                             goto illegal_op;
4907                         }
4908                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4909                         l1 = gen_new_label();
4910                         gen_jcc1_noeob(s, op1, l1);
4911                         gen_helper_fmov_ST0_STN(cpu_env,
4912                                                 tcg_constant_i32(opreg));
4913                         gen_set_label(l1);
4914                     }
4915                     break;
4916                 default:
4917                     goto unknown_op;
4918                 }
4919             }
4920 
4921             if (update_fip) {
4922                 tcg_gen_ld_i32(s->tmp2_i32, cpu_env,
4923                                offsetof(CPUX86State, segs[R_CS].selector));
4924                 tcg_gen_st16_i32(s->tmp2_i32, cpu_env,
4925                                  offsetof(CPUX86State, fpcs));
4926                 tcg_gen_st_tl(eip_cur_tl(s),
4927                               cpu_env, offsetof(CPUX86State, fpip));
4928             }
4929         }
4930         break;
4931         /************************/
4932         /* string ops */
4933 
4934     case 0xa4: /* movsS */
4935     case 0xa5:
4936         ot = mo_b_d(b, dflag);
4937         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4938             gen_repz_movs(s, ot);
4939         } else {
4940             gen_movs(s, ot);
4941         }
4942         break;
4943 
4944     case 0xaa: /* stosS */
4945     case 0xab:
4946         ot = mo_b_d(b, dflag);
4947         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4948             gen_repz_stos(s, ot);
4949         } else {
4950             gen_stos(s, ot);
4951         }
4952         break;
4953     case 0xac: /* lodsS */
4954     case 0xad:
4955         ot = mo_b_d(b, dflag);
4956         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4957             gen_repz_lods(s, ot);
4958         } else {
4959             gen_lods(s, ot);
4960         }
4961         break;
4962     case 0xae: /* scasS */
4963     case 0xaf:
4964         ot = mo_b_d(b, dflag);
4965         if (prefixes & PREFIX_REPNZ) {
4966             gen_repz_scas(s, ot, 1);
4967         } else if (prefixes & PREFIX_REPZ) {
4968             gen_repz_scas(s, ot, 0);
4969         } else {
4970             gen_scas(s, ot);
4971         }
4972         break;
4973 
4974     case 0xa6: /* cmpsS */
4975     case 0xa7:
4976         ot = mo_b_d(b, dflag);
4977         if (prefixes & PREFIX_REPNZ) {
4978             gen_repz_cmps(s, ot, 1);
4979         } else if (prefixes & PREFIX_REPZ) {
4980             gen_repz_cmps(s, ot, 0);
4981         } else {
4982             gen_cmps(s, ot);
4983         }
4984         break;
4985     case 0x6c: /* insS */
4986     case 0x6d:
4987         ot = mo_b_d32(b, dflag);
4988         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4989         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4990         if (!gen_check_io(s, ot, s->tmp2_i32,
4991                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
4992             break;
4993         }
4994         translator_io_start(&s->base);
4995         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4996             gen_repz_ins(s, ot);
4997         } else {
4998             gen_ins(s, ot);
4999         }
5000         break;
5001     case 0x6e: /* outsS */
5002     case 0x6f:
5003         ot = mo_b_d32(b, dflag);
5004         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5005         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5006         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5007             break;
5008         }
5009         translator_io_start(&s->base);
5010         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5011             gen_repz_outs(s, ot);
5012         } else {
5013             gen_outs(s, ot);
5014         }
5015         break;
5016 
5017         /************************/
5018         /* port I/O */
5019 
5020     case 0xe4:
5021     case 0xe5:
5022         ot = mo_b_d32(b, dflag);
5023         val = x86_ldub_code(env, s);
5024         tcg_gen_movi_i32(s->tmp2_i32, val);
5025         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5026             break;
5027         }
5028         translator_io_start(&s->base);
5029         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5030         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5031         gen_bpt_io(s, s->tmp2_i32, ot);
5032         break;
5033     case 0xe6:
5034     case 0xe7:
5035         ot = mo_b_d32(b, dflag);
5036         val = x86_ldub_code(env, s);
5037         tcg_gen_movi_i32(s->tmp2_i32, val);
5038         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5039             break;
5040         }
5041         translator_io_start(&s->base);
5042         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5043         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5044         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5045         gen_bpt_io(s, s->tmp2_i32, ot);
5046         break;
5047     case 0xec:
5048     case 0xed:
5049         ot = mo_b_d32(b, dflag);
5050         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5051         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5052         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5053             break;
5054         }
5055         translator_io_start(&s->base);
5056         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5057         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5058         gen_bpt_io(s, s->tmp2_i32, ot);
5059         break;
5060     case 0xee:
5061     case 0xef:
5062         ot = mo_b_d32(b, dflag);
5063         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5064         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5065         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5066             break;
5067         }
5068         translator_io_start(&s->base);
5069         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5070         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5071         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5072         gen_bpt_io(s, s->tmp2_i32, ot);
5073         break;
5074 
5075         /************************/
5076         /* control */
5077     case 0xc2: /* ret im */
5078         val = x86_ldsw_code(env, s);
5079         ot = gen_pop_T0(s);
5080         gen_stack_update(s, val + (1 << ot));
5081         /* Note that gen_pop_T0 uses a zero-extending load.  */
5082         gen_op_jmp_v(s, s->T0);
5083         gen_bnd_jmp(s);
5084         s->base.is_jmp = DISAS_JUMP;
5085         break;
5086     case 0xc3: /* ret */
5087         ot = gen_pop_T0(s);
5088         gen_pop_update(s, ot);
5089         /* Note that gen_pop_T0 uses a zero-extending load.  */
5090         gen_op_jmp_v(s, s->T0);
5091         gen_bnd_jmp(s);
5092         s->base.is_jmp = DISAS_JUMP;
5093         break;
5094     case 0xca: /* lret im */
5095         val = x86_ldsw_code(env, s);
5096     do_lret:
5097         if (PE(s) && !VM86(s)) {
5098             gen_update_cc_op(s);
5099             gen_update_eip_cur(s);
5100             gen_helper_lret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5101                                       tcg_constant_i32(val));
5102         } else {
5103             gen_stack_A0(s);
5104             /* pop offset */
5105             gen_op_ld_v(s, dflag, s->T0, s->A0);
5106             /* NOTE: keeping EIP updated is not a problem in case of
5107                exception */
5108             gen_op_jmp_v(s, s->T0);
5109             /* pop selector */
5110             gen_add_A0_im(s, 1 << dflag);
5111             gen_op_ld_v(s, dflag, s->T0, s->A0);
5112             gen_op_movl_seg_T0_vm(s, R_CS);
5113             /* add stack offset */
5114             gen_stack_update(s, val + (2 << dflag));
5115         }
5116         s->base.is_jmp = DISAS_EOB_ONLY;
5117         break;
5118     case 0xcb: /* lret */
5119         val = 0;
5120         goto do_lret;
5121     case 0xcf: /* iret */
5122         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5123         if (!PE(s) || VM86(s)) {
5124             /* real mode or vm86 mode */
5125             if (!check_vm86_iopl(s)) {
5126                 break;
5127             }
5128             gen_helper_iret_real(cpu_env, tcg_constant_i32(dflag - 1));
5129         } else {
5130             gen_helper_iret_protected(cpu_env, tcg_constant_i32(dflag - 1),
5131                                       eip_next_i32(s));
5132         }
5133         set_cc_op(s, CC_OP_EFLAGS);
5134         s->base.is_jmp = DISAS_EOB_ONLY;
5135         break;
5136     case 0xe8: /* call im */
5137         {
5138             int diff = (dflag != MO_16
5139                         ? (int32_t)insn_get(env, s, MO_32)
5140                         : (int16_t)insn_get(env, s, MO_16));
5141             gen_push_v(s, eip_next_tl(s));
5142             gen_bnd_jmp(s);
5143             gen_jmp_rel(s, dflag, diff, 0);
5144         }
5145         break;
5146     case 0x9a: /* lcall im */
5147         {
5148             unsigned int selector, offset;
5149 
5150             if (CODE64(s))
5151                 goto illegal_op;
5152             ot = dflag;
5153             offset = insn_get(env, s, ot);
5154             selector = insn_get(env, s, MO_16);
5155 
5156             tcg_gen_movi_tl(s->T0, selector);
5157             tcg_gen_movi_tl(s->T1, offset);
5158         }
5159         goto do_lcall;
5160     case 0xe9: /* jmp im */
5161         {
5162             int diff = (dflag != MO_16
5163                         ? (int32_t)insn_get(env, s, MO_32)
5164                         : (int16_t)insn_get(env, s, MO_16));
5165             gen_bnd_jmp(s);
5166             gen_jmp_rel(s, dflag, diff, 0);
5167         }
5168         break;
5169     case 0xea: /* ljmp im */
5170         {
5171             unsigned int selector, offset;
5172 
5173             if (CODE64(s))
5174                 goto illegal_op;
5175             ot = dflag;
5176             offset = insn_get(env, s, ot);
5177             selector = insn_get(env, s, MO_16);
5178 
5179             tcg_gen_movi_tl(s->T0, selector);
5180             tcg_gen_movi_tl(s->T1, offset);
5181         }
5182         goto do_ljmp;
5183     case 0xeb: /* jmp Jb */
5184         {
5185             int diff = (int8_t)insn_get(env, s, MO_8);
5186             gen_jmp_rel(s, dflag, diff, 0);
5187         }
5188         break;
5189     case 0x70 ... 0x7f: /* jcc Jb */
5190         {
5191             int diff = (int8_t)insn_get(env, s, MO_8);
5192             gen_bnd_jmp(s);
5193             gen_jcc(s, b, diff);
5194         }
5195         break;
5196     case 0x180 ... 0x18f: /* jcc Jv */
5197         {
5198             int diff = (dflag != MO_16
5199                         ? (int32_t)insn_get(env, s, MO_32)
5200                         : (int16_t)insn_get(env, s, MO_16));
5201             gen_bnd_jmp(s);
5202             gen_jcc(s, b, diff);
5203         }
5204         break;
5205 
5206     case 0x190 ... 0x19f: /* setcc Gv */
5207         modrm = x86_ldub_code(env, s);
5208         gen_setcc1(s, b, s->T0);
5209         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5210         break;
5211     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5212         if (!(s->cpuid_features & CPUID_CMOV)) {
5213             goto illegal_op;
5214         }
5215         ot = dflag;
5216         modrm = x86_ldub_code(env, s);
5217         reg = ((modrm >> 3) & 7) | REX_R(s);
5218         gen_cmovcc1(env, s, ot, b, modrm, reg);
5219         break;
5220 
5221         /************************/
5222         /* flags */
5223     case 0x9c: /* pushf */
5224         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5225         if (check_vm86_iopl(s)) {
5226             gen_update_cc_op(s);
5227             gen_helper_read_eflags(s->T0, cpu_env);
5228             gen_push_v(s, s->T0);
5229         }
5230         break;
5231     case 0x9d: /* popf */
5232         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5233         if (check_vm86_iopl(s)) {
5234             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5235 
5236             if (CPL(s) == 0) {
5237                 mask |= IF_MASK | IOPL_MASK;
5238             } else if (CPL(s) <= IOPL(s)) {
5239                 mask |= IF_MASK;
5240             }
5241             if (dflag == MO_16) {
5242                 mask &= 0xffff;
5243             }
5244 
5245             ot = gen_pop_T0(s);
5246             gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask));
5247             gen_pop_update(s, ot);
5248             set_cc_op(s, CC_OP_EFLAGS);
5249             /* abort translation because TF/AC flag may change */
5250             s->base.is_jmp = DISAS_EOB_NEXT;
5251         }
5252         break;
5253     case 0x9e: /* sahf */
5254         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5255             goto illegal_op;
5256         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5257         gen_compute_eflags(s);
5258         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5259         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5260         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5261         break;
5262     case 0x9f: /* lahf */
5263         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5264             goto illegal_op;
5265         gen_compute_eflags(s);
5266         /* Note: gen_compute_eflags() only gives the condition codes */
5267         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5268         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5269         break;
5270     case 0xf5: /* cmc */
5271         gen_compute_eflags(s);
5272         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5273         break;
5274     case 0xf8: /* clc */
5275         gen_compute_eflags(s);
5276         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5277         break;
5278     case 0xf9: /* stc */
5279         gen_compute_eflags(s);
5280         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5281         break;
5282     case 0xfc: /* cld */
5283         tcg_gen_movi_i32(s->tmp2_i32, 1);
5284         tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5285         break;
5286     case 0xfd: /* std */
5287         tcg_gen_movi_i32(s->tmp2_i32, -1);
5288         tcg_gen_st_i32(s->tmp2_i32, cpu_env, offsetof(CPUX86State, df));
5289         break;
5290 
5291         /************************/
5292         /* bit operations */
5293     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5294         ot = dflag;
5295         modrm = x86_ldub_code(env, s);
5296         op = (modrm >> 3) & 7;
5297         mod = (modrm >> 6) & 3;
5298         rm = (modrm & 7) | REX_B(s);
5299         if (mod != 3) {
5300             s->rip_offset = 1;
5301             gen_lea_modrm(env, s, modrm);
5302             if (!(s->prefix & PREFIX_LOCK)) {
5303                 gen_op_ld_v(s, ot, s->T0, s->A0);
5304             }
5305         } else {
5306             gen_op_mov_v_reg(s, ot, s->T0, rm);
5307         }
5308         /* load shift */
5309         val = x86_ldub_code(env, s);
5310         tcg_gen_movi_tl(s->T1, val);
5311         if (op < 4)
5312             goto unknown_op;
5313         op -= 4;
5314         goto bt_op;
5315     case 0x1a3: /* bt Gv, Ev */
5316         op = 0;
5317         goto do_btx;
5318     case 0x1ab: /* bts */
5319         op = 1;
5320         goto do_btx;
5321     case 0x1b3: /* btr */
5322         op = 2;
5323         goto do_btx;
5324     case 0x1bb: /* btc */
5325         op = 3;
5326     do_btx:
5327         ot = dflag;
5328         modrm = x86_ldub_code(env, s);
5329         reg = ((modrm >> 3) & 7) | REX_R(s);
5330         mod = (modrm >> 6) & 3;
5331         rm = (modrm & 7) | REX_B(s);
5332         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5333         if (mod != 3) {
5334             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5335             /* specific case: we need to add a displacement */
5336             gen_exts(ot, s->T1);
5337             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5338             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5339             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5340             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5341             if (!(s->prefix & PREFIX_LOCK)) {
5342                 gen_op_ld_v(s, ot, s->T0, s->A0);
5343             }
5344         } else {
5345             gen_op_mov_v_reg(s, ot, s->T0, rm);
5346         }
5347     bt_op:
5348         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5349         tcg_gen_movi_tl(s->tmp0, 1);
5350         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5351         if (s->prefix & PREFIX_LOCK) {
5352             switch (op) {
5353             case 0: /* bt */
5354                 /* Needs no atomic ops; we surpressed the normal
5355                    memory load for LOCK above so do it now.  */
5356                 gen_op_ld_v(s, ot, s->T0, s->A0);
5357                 break;
5358             case 1: /* bts */
5359                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5360                                            s->mem_index, ot | MO_LE);
5361                 break;
5362             case 2: /* btr */
5363                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5364                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5365                                             s->mem_index, ot | MO_LE);
5366                 break;
5367             default:
5368             case 3: /* btc */
5369                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5370                                             s->mem_index, ot | MO_LE);
5371                 break;
5372             }
5373             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5374         } else {
5375             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5376             switch (op) {
5377             case 0: /* bt */
5378                 /* Data already loaded; nothing to do.  */
5379                 break;
5380             case 1: /* bts */
5381                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5382                 break;
5383             case 2: /* btr */
5384                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5385                 break;
5386             default:
5387             case 3: /* btc */
5388                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5389                 break;
5390             }
5391             if (op != 0) {
5392                 if (mod != 3) {
5393                     gen_op_st_v(s, ot, s->T0, s->A0);
5394                 } else {
5395                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5396                 }
5397             }
5398         }
5399 
5400         /* Delay all CC updates until after the store above.  Note that
5401            C is the result of the test, Z is unchanged, and the others
5402            are all undefined.  */
5403         switch (s->cc_op) {
5404         case CC_OP_MULB ... CC_OP_MULQ:
5405         case CC_OP_ADDB ... CC_OP_ADDQ:
5406         case CC_OP_ADCB ... CC_OP_ADCQ:
5407         case CC_OP_SUBB ... CC_OP_SUBQ:
5408         case CC_OP_SBBB ... CC_OP_SBBQ:
5409         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5410         case CC_OP_INCB ... CC_OP_INCQ:
5411         case CC_OP_DECB ... CC_OP_DECQ:
5412         case CC_OP_SHLB ... CC_OP_SHLQ:
5413         case CC_OP_SARB ... CC_OP_SARQ:
5414         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5415             /* Z was going to be computed from the non-zero status of CC_DST.
5416                We can get that same Z value (and the new C value) by leaving
5417                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5418                same width.  */
5419             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5420             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5421             break;
5422         default:
5423             /* Otherwise, generate EFLAGS and replace the C bit.  */
5424             gen_compute_eflags(s);
5425             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5426                                ctz32(CC_C), 1);
5427             break;
5428         }
5429         break;
5430     case 0x1bc: /* bsf / tzcnt */
5431     case 0x1bd: /* bsr / lzcnt */
5432         ot = dflag;
5433         modrm = x86_ldub_code(env, s);
5434         reg = ((modrm >> 3) & 7) | REX_R(s);
5435         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5436         gen_extu(ot, s->T0);
5437 
5438         /* Note that lzcnt and tzcnt are in different extensions.  */
5439         if ((prefixes & PREFIX_REPZ)
5440             && (b & 1
5441                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5442                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5443             int size = 8 << ot;
5444             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5445             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5446             if (b & 1) {
5447                 /* For lzcnt, reduce the target_ulong result by the
5448                    number of zeros that we expect to find at the top.  */
5449                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5450                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5451             } else {
5452                 /* For tzcnt, a zero input must return the operand size.  */
5453                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5454             }
5455             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5456             gen_op_update1_cc(s);
5457             set_cc_op(s, CC_OP_BMILGB + ot);
5458         } else {
5459             /* For bsr/bsf, only the Z bit is defined and it is related
5460                to the input and not the result.  */
5461             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5462             set_cc_op(s, CC_OP_LOGICB + ot);
5463 
5464             /* ??? The manual says that the output is undefined when the
5465                input is zero, but real hardware leaves it unchanged, and
5466                real programs appear to depend on that.  Accomplish this
5467                by passing the output as the value to return upon zero.  */
5468             if (b & 1) {
5469                 /* For bsr, return the bit index of the first 1 bit,
5470                    not the count of leading zeros.  */
5471                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5472                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5473                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5474             } else {
5475                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5476             }
5477         }
5478         gen_op_mov_reg_v(s, ot, reg, s->T0);
5479         break;
5480         /************************/
5481         /* bcd */
5482     case 0x27: /* daa */
5483         if (CODE64(s))
5484             goto illegal_op;
5485         gen_update_cc_op(s);
5486         gen_helper_daa(cpu_env);
5487         set_cc_op(s, CC_OP_EFLAGS);
5488         break;
5489     case 0x2f: /* das */
5490         if (CODE64(s))
5491             goto illegal_op;
5492         gen_update_cc_op(s);
5493         gen_helper_das(cpu_env);
5494         set_cc_op(s, CC_OP_EFLAGS);
5495         break;
5496     case 0x37: /* aaa */
5497         if (CODE64(s))
5498             goto illegal_op;
5499         gen_update_cc_op(s);
5500         gen_helper_aaa(cpu_env);
5501         set_cc_op(s, CC_OP_EFLAGS);
5502         break;
5503     case 0x3f: /* aas */
5504         if (CODE64(s))
5505             goto illegal_op;
5506         gen_update_cc_op(s);
5507         gen_helper_aas(cpu_env);
5508         set_cc_op(s, CC_OP_EFLAGS);
5509         break;
5510     case 0xd4: /* aam */
5511         if (CODE64(s))
5512             goto illegal_op;
5513         val = x86_ldub_code(env, s);
5514         if (val == 0) {
5515             gen_exception(s, EXCP00_DIVZ);
5516         } else {
5517             gen_helper_aam(cpu_env, tcg_constant_i32(val));
5518             set_cc_op(s, CC_OP_LOGICB);
5519         }
5520         break;
5521     case 0xd5: /* aad */
5522         if (CODE64(s))
5523             goto illegal_op;
5524         val = x86_ldub_code(env, s);
5525         gen_helper_aad(cpu_env, tcg_constant_i32(val));
5526         set_cc_op(s, CC_OP_LOGICB);
5527         break;
5528         /************************/
5529         /* misc */
5530     case 0x90: /* nop */
5531         /* XXX: correct lock test for all insn */
5532         if (prefixes & PREFIX_LOCK) {
5533             goto illegal_op;
5534         }
5535         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5536         if (REX_B(s)) {
5537             goto do_xchg_reg_eax;
5538         }
5539         if (prefixes & PREFIX_REPZ) {
5540             gen_update_cc_op(s);
5541             gen_update_eip_cur(s);
5542             gen_helper_pause(cpu_env, cur_insn_len_i32(s));
5543             s->base.is_jmp = DISAS_NORETURN;
5544         }
5545         break;
5546     case 0x9b: /* fwait */
5547         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5548             (HF_MP_MASK | HF_TS_MASK)) {
5549             gen_exception(s, EXCP07_PREX);
5550         } else {
5551             gen_helper_fwait(cpu_env);
5552         }
5553         break;
5554     case 0xcc: /* int3 */
5555         gen_interrupt(s, EXCP03_INT3);
5556         break;
5557     case 0xcd: /* int N */
5558         val = x86_ldub_code(env, s);
5559         if (check_vm86_iopl(s)) {
5560             gen_interrupt(s, val);
5561         }
5562         break;
5563     case 0xce: /* into */
5564         if (CODE64(s))
5565             goto illegal_op;
5566         gen_update_cc_op(s);
5567         gen_update_eip_cur(s);
5568         gen_helper_into(cpu_env, cur_insn_len_i32(s));
5569         break;
5570 #ifdef WANT_ICEBP
5571     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5572         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5573         gen_debug(s);
5574         break;
5575 #endif
5576     case 0xfa: /* cli */
5577         if (check_iopl(s)) {
5578             gen_reset_eflags(s, IF_MASK);
5579         }
5580         break;
5581     case 0xfb: /* sti */
5582         if (check_iopl(s)) {
5583             gen_set_eflags(s, IF_MASK);
5584             /* interruptions are enabled only the first insn after sti */
5585             gen_update_eip_next(s);
5586             gen_eob_inhibit_irq(s, true);
5587         }
5588         break;
5589     case 0x62: /* bound */
5590         if (CODE64(s))
5591             goto illegal_op;
5592         ot = dflag;
5593         modrm = x86_ldub_code(env, s);
5594         reg = (modrm >> 3) & 7;
5595         mod = (modrm >> 6) & 3;
5596         if (mod == 3)
5597             goto illegal_op;
5598         gen_op_mov_v_reg(s, ot, s->T0, reg);
5599         gen_lea_modrm(env, s, modrm);
5600         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5601         if (ot == MO_16) {
5602             gen_helper_boundw(cpu_env, s->A0, s->tmp2_i32);
5603         } else {
5604             gen_helper_boundl(cpu_env, s->A0, s->tmp2_i32);
5605         }
5606         break;
5607     case 0x1c8 ... 0x1cf: /* bswap reg */
5608         reg = (b & 7) | REX_B(s);
5609 #ifdef TARGET_X86_64
5610         if (dflag == MO_64) {
5611             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5612             break;
5613         }
5614 #endif
5615         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5616         break;
5617     case 0xd6: /* salc */
5618         if (CODE64(s))
5619             goto illegal_op;
5620         gen_compute_eflags_c(s, s->T0);
5621         tcg_gen_neg_tl(s->T0, s->T0);
5622         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5623         break;
5624     case 0xe0: /* loopnz */
5625     case 0xe1: /* loopz */
5626     case 0xe2: /* loop */
5627     case 0xe3: /* jecxz */
5628         {
5629             TCGLabel *l1, *l2;
5630             int diff = (int8_t)insn_get(env, s, MO_8);
5631 
5632             l1 = gen_new_label();
5633             l2 = gen_new_label();
5634             gen_update_cc_op(s);
5635             b &= 3;
5636             switch(b) {
5637             case 0: /* loopnz */
5638             case 1: /* loopz */
5639                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5640                 gen_op_jz_ecx(s, l2);
5641                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5642                 break;
5643             case 2: /* loop */
5644                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5645                 gen_op_jnz_ecx(s, l1);
5646                 break;
5647             default:
5648             case 3: /* jcxz */
5649                 gen_op_jz_ecx(s, l1);
5650                 break;
5651             }
5652 
5653             gen_set_label(l2);
5654             gen_jmp_rel_csize(s, 0, 1);
5655 
5656             gen_set_label(l1);
5657             gen_jmp_rel(s, dflag, diff, 0);
5658         }
5659         break;
5660     case 0x130: /* wrmsr */
5661     case 0x132: /* rdmsr */
5662         if (check_cpl0(s)) {
5663             gen_update_cc_op(s);
5664             gen_update_eip_cur(s);
5665             if (b & 2) {
5666                 gen_helper_rdmsr(cpu_env);
5667             } else {
5668                 gen_helper_wrmsr(cpu_env);
5669                 s->base.is_jmp = DISAS_EOB_NEXT;
5670             }
5671         }
5672         break;
5673     case 0x131: /* rdtsc */
5674         gen_update_cc_op(s);
5675         gen_update_eip_cur(s);
5676         translator_io_start(&s->base);
5677         gen_helper_rdtsc(cpu_env);
5678         break;
5679     case 0x133: /* rdpmc */
5680         gen_update_cc_op(s);
5681         gen_update_eip_cur(s);
5682         gen_helper_rdpmc(cpu_env);
5683         s->base.is_jmp = DISAS_NORETURN;
5684         break;
5685     case 0x134: /* sysenter */
5686         /* For AMD SYSENTER is not valid in long mode */
5687         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5688             goto illegal_op;
5689         }
5690         if (!PE(s)) {
5691             gen_exception_gpf(s);
5692         } else {
5693             gen_helper_sysenter(cpu_env);
5694             s->base.is_jmp = DISAS_EOB_ONLY;
5695         }
5696         break;
5697     case 0x135: /* sysexit */
5698         /* For AMD SYSEXIT is not valid in long mode */
5699         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5700             goto illegal_op;
5701         }
5702         if (!PE(s) || CPL(s) != 0) {
5703             gen_exception_gpf(s);
5704         } else {
5705             gen_helper_sysexit(cpu_env, tcg_constant_i32(dflag - 1));
5706             s->base.is_jmp = DISAS_EOB_ONLY;
5707         }
5708         break;
5709     case 0x105: /* syscall */
5710         /* For Intel SYSCALL is only valid in long mode */
5711         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5712             goto illegal_op;
5713         }
5714         gen_update_cc_op(s);
5715         gen_update_eip_cur(s);
5716         gen_helper_syscall(cpu_env, cur_insn_len_i32(s));
5717         /* TF handling for the syscall insn is different. The TF bit is  checked
5718            after the syscall insn completes. This allows #DB to not be
5719            generated after one has entered CPL0 if TF is set in FMASK.  */
5720         gen_eob_worker(s, false, true);
5721         break;
5722     case 0x107: /* sysret */
5723         /* For Intel SYSRET is only valid in long mode */
5724         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5725             goto illegal_op;
5726         }
5727         if (!PE(s) || CPL(s) != 0) {
5728             gen_exception_gpf(s);
5729         } else {
5730             gen_helper_sysret(cpu_env, tcg_constant_i32(dflag - 1));
5731             /* condition codes are modified only in long mode */
5732             if (LMA(s)) {
5733                 set_cc_op(s, CC_OP_EFLAGS);
5734             }
5735             /* TF handling for the sysret insn is different. The TF bit is
5736                checked after the sysret insn completes. This allows #DB to be
5737                generated "as if" the syscall insn in userspace has just
5738                completed.  */
5739             gen_eob_worker(s, false, true);
5740         }
5741         break;
5742     case 0x1a2: /* cpuid */
5743         gen_update_cc_op(s);
5744         gen_update_eip_cur(s);
5745         gen_helper_cpuid(cpu_env);
5746         break;
5747     case 0xf4: /* hlt */
5748         if (check_cpl0(s)) {
5749             gen_update_cc_op(s);
5750             gen_update_eip_cur(s);
5751             gen_helper_hlt(cpu_env, cur_insn_len_i32(s));
5752             s->base.is_jmp = DISAS_NORETURN;
5753         }
5754         break;
5755     case 0x100:
5756         modrm = x86_ldub_code(env, s);
5757         mod = (modrm >> 6) & 3;
5758         op = (modrm >> 3) & 7;
5759         switch(op) {
5760         case 0: /* sldt */
5761             if (!PE(s) || VM86(s))
5762                 goto illegal_op;
5763             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5764                 break;
5765             }
5766             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5767             tcg_gen_ld32u_tl(s->T0, cpu_env,
5768                              offsetof(CPUX86State, ldt.selector));
5769             ot = mod == 3 ? dflag : MO_16;
5770             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5771             break;
5772         case 2: /* lldt */
5773             if (!PE(s) || VM86(s))
5774                 goto illegal_op;
5775             if (check_cpl0(s)) {
5776                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5777                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5778                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5779                 gen_helper_lldt(cpu_env, s->tmp2_i32);
5780             }
5781             break;
5782         case 1: /* str */
5783             if (!PE(s) || VM86(s))
5784                 goto illegal_op;
5785             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5786                 break;
5787             }
5788             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5789             tcg_gen_ld32u_tl(s->T0, cpu_env,
5790                              offsetof(CPUX86State, tr.selector));
5791             ot = mod == 3 ? dflag : MO_16;
5792             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5793             break;
5794         case 3: /* ltr */
5795             if (!PE(s) || VM86(s))
5796                 goto illegal_op;
5797             if (check_cpl0(s)) {
5798                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5799                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5800                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5801                 gen_helper_ltr(cpu_env, s->tmp2_i32);
5802             }
5803             break;
5804         case 4: /* verr */
5805         case 5: /* verw */
5806             if (!PE(s) || VM86(s))
5807                 goto illegal_op;
5808             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5809             gen_update_cc_op(s);
5810             if (op == 4) {
5811                 gen_helper_verr(cpu_env, s->T0);
5812             } else {
5813                 gen_helper_verw(cpu_env, s->T0);
5814             }
5815             set_cc_op(s, CC_OP_EFLAGS);
5816             break;
5817         default:
5818             goto unknown_op;
5819         }
5820         break;
5821 
5822     case 0x101:
5823         modrm = x86_ldub_code(env, s);
5824         switch (modrm) {
5825         CASE_MODRM_MEM_OP(0): /* sgdt */
5826             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5827                 break;
5828             }
5829             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5830             gen_lea_modrm(env, s, modrm);
5831             tcg_gen_ld32u_tl(s->T0,
5832                              cpu_env, offsetof(CPUX86State, gdt.limit));
5833             gen_op_st_v(s, MO_16, s->T0, s->A0);
5834             gen_add_A0_im(s, 2);
5835             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
5836             if (dflag == MO_16) {
5837                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5838             }
5839             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5840             break;
5841 
5842         case 0xc8: /* monitor */
5843             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5844                 goto illegal_op;
5845             }
5846             gen_update_cc_op(s);
5847             gen_update_eip_cur(s);
5848             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5849             gen_extu(s->aflag, s->A0);
5850             gen_add_A0_ds_seg(s);
5851             gen_helper_monitor(cpu_env, s->A0);
5852             break;
5853 
5854         case 0xc9: /* mwait */
5855             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5856                 goto illegal_op;
5857             }
5858             gen_update_cc_op(s);
5859             gen_update_eip_cur(s);
5860             gen_helper_mwait(cpu_env, cur_insn_len_i32(s));
5861             s->base.is_jmp = DISAS_NORETURN;
5862             break;
5863 
5864         case 0xca: /* clac */
5865             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5866                 || CPL(s) != 0) {
5867                 goto illegal_op;
5868             }
5869             gen_reset_eflags(s, AC_MASK);
5870             s->base.is_jmp = DISAS_EOB_NEXT;
5871             break;
5872 
5873         case 0xcb: /* stac */
5874             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5875                 || CPL(s) != 0) {
5876                 goto illegal_op;
5877             }
5878             gen_set_eflags(s, AC_MASK);
5879             s->base.is_jmp = DISAS_EOB_NEXT;
5880             break;
5881 
5882         CASE_MODRM_MEM_OP(1): /* sidt */
5883             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5884                 break;
5885             }
5886             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5887             gen_lea_modrm(env, s, modrm);
5888             tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.limit));
5889             gen_op_st_v(s, MO_16, s->T0, s->A0);
5890             gen_add_A0_im(s, 2);
5891             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
5892             if (dflag == MO_16) {
5893                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5894             }
5895             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5896             break;
5897 
5898         case 0xd0: /* xgetbv */
5899             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5900                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5901                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5902                 goto illegal_op;
5903             }
5904             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5905             gen_helper_xgetbv(s->tmp1_i64, cpu_env, s->tmp2_i32);
5906             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5907             break;
5908 
5909         case 0xd1: /* xsetbv */
5910             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5911                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5912                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5913                 goto illegal_op;
5914             }
5915             if (!check_cpl0(s)) {
5916                 break;
5917             }
5918             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5919                                   cpu_regs[R_EDX]);
5920             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5921             gen_helper_xsetbv(cpu_env, s->tmp2_i32, s->tmp1_i64);
5922             /* End TB because translation flags may change.  */
5923             s->base.is_jmp = DISAS_EOB_NEXT;
5924             break;
5925 
5926         case 0xd8: /* VMRUN */
5927             if (!SVME(s) || !PE(s)) {
5928                 goto illegal_op;
5929             }
5930             if (!check_cpl0(s)) {
5931                 break;
5932             }
5933             gen_update_cc_op(s);
5934             gen_update_eip_cur(s);
5935             gen_helper_vmrun(cpu_env, tcg_constant_i32(s->aflag - 1),
5936                              cur_insn_len_i32(s));
5937             tcg_gen_exit_tb(NULL, 0);
5938             s->base.is_jmp = DISAS_NORETURN;
5939             break;
5940 
5941         case 0xd9: /* VMMCALL */
5942             if (!SVME(s)) {
5943                 goto illegal_op;
5944             }
5945             gen_update_cc_op(s);
5946             gen_update_eip_cur(s);
5947             gen_helper_vmmcall(cpu_env);
5948             break;
5949 
5950         case 0xda: /* VMLOAD */
5951             if (!SVME(s) || !PE(s)) {
5952                 goto illegal_op;
5953             }
5954             if (!check_cpl0(s)) {
5955                 break;
5956             }
5957             gen_update_cc_op(s);
5958             gen_update_eip_cur(s);
5959             gen_helper_vmload(cpu_env, tcg_constant_i32(s->aflag - 1));
5960             break;
5961 
5962         case 0xdb: /* VMSAVE */
5963             if (!SVME(s) || !PE(s)) {
5964                 goto illegal_op;
5965             }
5966             if (!check_cpl0(s)) {
5967                 break;
5968             }
5969             gen_update_cc_op(s);
5970             gen_update_eip_cur(s);
5971             gen_helper_vmsave(cpu_env, tcg_constant_i32(s->aflag - 1));
5972             break;
5973 
5974         case 0xdc: /* STGI */
5975             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5976                 || !PE(s)) {
5977                 goto illegal_op;
5978             }
5979             if (!check_cpl0(s)) {
5980                 break;
5981             }
5982             gen_update_cc_op(s);
5983             gen_helper_stgi(cpu_env);
5984             s->base.is_jmp = DISAS_EOB_NEXT;
5985             break;
5986 
5987         case 0xdd: /* CLGI */
5988             if (!SVME(s) || !PE(s)) {
5989                 goto illegal_op;
5990             }
5991             if (!check_cpl0(s)) {
5992                 break;
5993             }
5994             gen_update_cc_op(s);
5995             gen_update_eip_cur(s);
5996             gen_helper_clgi(cpu_env);
5997             break;
5998 
5999         case 0xde: /* SKINIT */
6000             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
6001                 || !PE(s)) {
6002                 goto illegal_op;
6003             }
6004             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6005             /* If not intercepted, not implemented -- raise #UD. */
6006             goto illegal_op;
6007 
6008         case 0xdf: /* INVLPGA */
6009             if (!SVME(s) || !PE(s)) {
6010                 goto illegal_op;
6011             }
6012             if (!check_cpl0(s)) {
6013                 break;
6014             }
6015             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6016             if (s->aflag == MO_64) {
6017                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6018             } else {
6019                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6020             }
6021             gen_helper_flush_page(cpu_env, s->A0);
6022             s->base.is_jmp = DISAS_EOB_NEXT;
6023             break;
6024 
6025         CASE_MODRM_MEM_OP(2): /* lgdt */
6026             if (!check_cpl0(s)) {
6027                 break;
6028             }
6029             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6030             gen_lea_modrm(env, s, modrm);
6031             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6032             gen_add_A0_im(s, 2);
6033             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6034             if (dflag == MO_16) {
6035                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6036             }
6037             tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, gdt.base));
6038             tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, gdt.limit));
6039             break;
6040 
6041         CASE_MODRM_MEM_OP(3): /* lidt */
6042             if (!check_cpl0(s)) {
6043                 break;
6044             }
6045             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6046             gen_lea_modrm(env, s, modrm);
6047             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6048             gen_add_A0_im(s, 2);
6049             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6050             if (dflag == MO_16) {
6051                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6052             }
6053             tcg_gen_st_tl(s->T0, cpu_env, offsetof(CPUX86State, idt.base));
6054             tcg_gen_st32_tl(s->T1, cpu_env, offsetof(CPUX86State, idt.limit));
6055             break;
6056 
6057         CASE_MODRM_OP(4): /* smsw */
6058             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6059                 break;
6060             }
6061             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6062             tcg_gen_ld_tl(s->T0, cpu_env, offsetof(CPUX86State, cr[0]));
6063             /*
6064              * In 32-bit mode, the higher 16 bits of the destination
6065              * register are undefined.  In practice CR0[31:0] is stored
6066              * just like in 64-bit mode.
6067              */
6068             mod = (modrm >> 6) & 3;
6069             ot = (mod != 3 ? MO_16 : s->dflag);
6070             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6071             break;
6072         case 0xee: /* rdpkru */
6073             if (prefixes & PREFIX_LOCK) {
6074                 goto illegal_op;
6075             }
6076             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6077             gen_helper_rdpkru(s->tmp1_i64, cpu_env, s->tmp2_i32);
6078             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6079             break;
6080         case 0xef: /* wrpkru */
6081             if (prefixes & PREFIX_LOCK) {
6082                 goto illegal_op;
6083             }
6084             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6085                                   cpu_regs[R_EDX]);
6086             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6087             gen_helper_wrpkru(cpu_env, s->tmp2_i32, s->tmp1_i64);
6088             break;
6089 
6090         CASE_MODRM_OP(6): /* lmsw */
6091             if (!check_cpl0(s)) {
6092                 break;
6093             }
6094             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6095             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6096             /*
6097              * Only the 4 lower bits of CR0 are modified.
6098              * PE cannot be set to zero if already set to one.
6099              */
6100             tcg_gen_ld_tl(s->T1, cpu_env, offsetof(CPUX86State, cr[0]));
6101             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6102             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6103             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6104             gen_helper_write_crN(cpu_env, tcg_constant_i32(0), s->T0);
6105             s->base.is_jmp = DISAS_EOB_NEXT;
6106             break;
6107 
6108         CASE_MODRM_MEM_OP(7): /* invlpg */
6109             if (!check_cpl0(s)) {
6110                 break;
6111             }
6112             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6113             gen_lea_modrm(env, s, modrm);
6114             gen_helper_flush_page(cpu_env, s->A0);
6115             s->base.is_jmp = DISAS_EOB_NEXT;
6116             break;
6117 
6118         case 0xf8: /* swapgs */
6119 #ifdef TARGET_X86_64
6120             if (CODE64(s)) {
6121                 if (check_cpl0(s)) {
6122                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6123                     tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
6124                                   offsetof(CPUX86State, kernelgsbase));
6125                     tcg_gen_st_tl(s->T0, cpu_env,
6126                                   offsetof(CPUX86State, kernelgsbase));
6127                 }
6128                 break;
6129             }
6130 #endif
6131             goto illegal_op;
6132 
6133         case 0xf9: /* rdtscp */
6134             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6135                 goto illegal_op;
6136             }
6137             gen_update_cc_op(s);
6138             gen_update_eip_cur(s);
6139             translator_io_start(&s->base);
6140             gen_helper_rdtsc(cpu_env);
6141             gen_helper_rdpid(s->T0, cpu_env);
6142             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6143             break;
6144 
6145         default:
6146             goto unknown_op;
6147         }
6148         break;
6149 
6150     case 0x108: /* invd */
6151     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6152         if (check_cpl0(s)) {
6153             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6154             /* nothing to do */
6155         }
6156         break;
6157     case 0x63: /* arpl or movslS (x86_64) */
6158 #ifdef TARGET_X86_64
6159         if (CODE64(s)) {
6160             int d_ot;
6161             /* d_ot is the size of destination */
6162             d_ot = dflag;
6163 
6164             modrm = x86_ldub_code(env, s);
6165             reg = ((modrm >> 3) & 7) | REX_R(s);
6166             mod = (modrm >> 6) & 3;
6167             rm = (modrm & 7) | REX_B(s);
6168 
6169             if (mod == 3) {
6170                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6171                 /* sign extend */
6172                 if (d_ot == MO_64) {
6173                     tcg_gen_ext32s_tl(s->T0, s->T0);
6174                 }
6175                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6176             } else {
6177                 gen_lea_modrm(env, s, modrm);
6178                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6179                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6180             }
6181         } else
6182 #endif
6183         {
6184             TCGLabel *label1;
6185             TCGv t0, t1, t2;
6186 
6187             if (!PE(s) || VM86(s))
6188                 goto illegal_op;
6189             t0 = tcg_temp_new();
6190             t1 = tcg_temp_new();
6191             t2 = tcg_temp_new();
6192             ot = MO_16;
6193             modrm = x86_ldub_code(env, s);
6194             reg = (modrm >> 3) & 7;
6195             mod = (modrm >> 6) & 3;
6196             rm = modrm & 7;
6197             if (mod != 3) {
6198                 gen_lea_modrm(env, s, modrm);
6199                 gen_op_ld_v(s, ot, t0, s->A0);
6200             } else {
6201                 gen_op_mov_v_reg(s, ot, t0, rm);
6202             }
6203             gen_op_mov_v_reg(s, ot, t1, reg);
6204             tcg_gen_andi_tl(s->tmp0, t0, 3);
6205             tcg_gen_andi_tl(t1, t1, 3);
6206             tcg_gen_movi_tl(t2, 0);
6207             label1 = gen_new_label();
6208             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6209             tcg_gen_andi_tl(t0, t0, ~3);
6210             tcg_gen_or_tl(t0, t0, t1);
6211             tcg_gen_movi_tl(t2, CC_Z);
6212             gen_set_label(label1);
6213             if (mod != 3) {
6214                 gen_op_st_v(s, ot, t0, s->A0);
6215            } else {
6216                 gen_op_mov_reg_v(s, ot, rm, t0);
6217             }
6218             gen_compute_eflags(s);
6219             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6220             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6221         }
6222         break;
6223     case 0x102: /* lar */
6224     case 0x103: /* lsl */
6225         {
6226             TCGLabel *label1;
6227             TCGv t0;
6228             if (!PE(s) || VM86(s))
6229                 goto illegal_op;
6230             ot = dflag != MO_16 ? MO_32 : MO_16;
6231             modrm = x86_ldub_code(env, s);
6232             reg = ((modrm >> 3) & 7) | REX_R(s);
6233             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6234             t0 = tcg_temp_new();
6235             gen_update_cc_op(s);
6236             if (b == 0x102) {
6237                 gen_helper_lar(t0, cpu_env, s->T0);
6238             } else {
6239                 gen_helper_lsl(t0, cpu_env, s->T0);
6240             }
6241             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6242             label1 = gen_new_label();
6243             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6244             gen_op_mov_reg_v(s, ot, reg, t0);
6245             gen_set_label(label1);
6246             set_cc_op(s, CC_OP_EFLAGS);
6247         }
6248         break;
6249     case 0x118:
6250         modrm = x86_ldub_code(env, s);
6251         mod = (modrm >> 6) & 3;
6252         op = (modrm >> 3) & 7;
6253         switch(op) {
6254         case 0: /* prefetchnta */
6255         case 1: /* prefetchnt0 */
6256         case 2: /* prefetchnt0 */
6257         case 3: /* prefetchnt0 */
6258             if (mod == 3)
6259                 goto illegal_op;
6260             gen_nop_modrm(env, s, modrm);
6261             /* nothing more to do */
6262             break;
6263         default: /* nop (multi byte) */
6264             gen_nop_modrm(env, s, modrm);
6265             break;
6266         }
6267         break;
6268     case 0x11a:
6269         modrm = x86_ldub_code(env, s);
6270         if (s->flags & HF_MPX_EN_MASK) {
6271             mod = (modrm >> 6) & 3;
6272             reg = ((modrm >> 3) & 7) | REX_R(s);
6273             if (prefixes & PREFIX_REPZ) {
6274                 /* bndcl */
6275                 if (reg >= 4
6276                     || (prefixes & PREFIX_LOCK)
6277                     || s->aflag == MO_16) {
6278                     goto illegal_op;
6279                 }
6280                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6281             } else if (prefixes & PREFIX_REPNZ) {
6282                 /* bndcu */
6283                 if (reg >= 4
6284                     || (prefixes & PREFIX_LOCK)
6285                     || s->aflag == MO_16) {
6286                     goto illegal_op;
6287                 }
6288                 TCGv_i64 notu = tcg_temp_new_i64();
6289                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6290                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6291             } else if (prefixes & PREFIX_DATA) {
6292                 /* bndmov -- from reg/mem */
6293                 if (reg >= 4 || s->aflag == MO_16) {
6294                     goto illegal_op;
6295                 }
6296                 if (mod == 3) {
6297                     int reg2 = (modrm & 7) | REX_B(s);
6298                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6299                         goto illegal_op;
6300                     }
6301                     if (s->flags & HF_MPX_IU_MASK) {
6302                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6303                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6304                     }
6305                 } else {
6306                     gen_lea_modrm(env, s, modrm);
6307                     if (CODE64(s)) {
6308                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6309                                             s->mem_index, MO_LEUQ);
6310                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6311                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6312                                             s->mem_index, MO_LEUQ);
6313                     } else {
6314                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6315                                             s->mem_index, MO_LEUL);
6316                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6317                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6318                                             s->mem_index, MO_LEUL);
6319                     }
6320                     /* bnd registers are now in-use */
6321                     gen_set_hflag(s, HF_MPX_IU_MASK);
6322                 }
6323             } else if (mod != 3) {
6324                 /* bndldx */
6325                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6326                 if (reg >= 4
6327                     || (prefixes & PREFIX_LOCK)
6328                     || s->aflag == MO_16
6329                     || a.base < -1) {
6330                     goto illegal_op;
6331                 }
6332                 if (a.base >= 0) {
6333                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6334                 } else {
6335                     tcg_gen_movi_tl(s->A0, 0);
6336                 }
6337                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6338                 if (a.index >= 0) {
6339                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6340                 } else {
6341                     tcg_gen_movi_tl(s->T0, 0);
6342                 }
6343                 if (CODE64(s)) {
6344                     gen_helper_bndldx64(cpu_bndl[reg], cpu_env, s->A0, s->T0);
6345                     tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
6346                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6347                 } else {
6348                     gen_helper_bndldx32(cpu_bndu[reg], cpu_env, s->A0, s->T0);
6349                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6350                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6351                 }
6352                 gen_set_hflag(s, HF_MPX_IU_MASK);
6353             }
6354         }
6355         gen_nop_modrm(env, s, modrm);
6356         break;
6357     case 0x11b:
6358         modrm = x86_ldub_code(env, s);
6359         if (s->flags & HF_MPX_EN_MASK) {
6360             mod = (modrm >> 6) & 3;
6361             reg = ((modrm >> 3) & 7) | REX_R(s);
6362             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6363                 /* bndmk */
6364                 if (reg >= 4
6365                     || (prefixes & PREFIX_LOCK)
6366                     || s->aflag == MO_16) {
6367                     goto illegal_op;
6368                 }
6369                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6370                 if (a.base >= 0) {
6371                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6372                     if (!CODE64(s)) {
6373                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6374                     }
6375                 } else if (a.base == -1) {
6376                     /* no base register has lower bound of 0 */
6377                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6378                 } else {
6379                     /* rip-relative generates #ud */
6380                     goto illegal_op;
6381                 }
6382                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6383                 if (!CODE64(s)) {
6384                     tcg_gen_ext32u_tl(s->A0, s->A0);
6385                 }
6386                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6387                 /* bnd registers are now in-use */
6388                 gen_set_hflag(s, HF_MPX_IU_MASK);
6389                 break;
6390             } else if (prefixes & PREFIX_REPNZ) {
6391                 /* bndcn */
6392                 if (reg >= 4
6393                     || (prefixes & PREFIX_LOCK)
6394                     || s->aflag == MO_16) {
6395                     goto illegal_op;
6396                 }
6397                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6398             } else if (prefixes & PREFIX_DATA) {
6399                 /* bndmov -- to reg/mem */
6400                 if (reg >= 4 || s->aflag == MO_16) {
6401                     goto illegal_op;
6402                 }
6403                 if (mod == 3) {
6404                     int reg2 = (modrm & 7) | REX_B(s);
6405                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6406                         goto illegal_op;
6407                     }
6408                     if (s->flags & HF_MPX_IU_MASK) {
6409                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6410                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6411                     }
6412                 } else {
6413                     gen_lea_modrm(env, s, modrm);
6414                     if (CODE64(s)) {
6415                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6416                                             s->mem_index, MO_LEUQ);
6417                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6418                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6419                                             s->mem_index, MO_LEUQ);
6420                     } else {
6421                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6422                                             s->mem_index, MO_LEUL);
6423                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6424                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6425                                             s->mem_index, MO_LEUL);
6426                     }
6427                 }
6428             } else if (mod != 3) {
6429                 /* bndstx */
6430                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6431                 if (reg >= 4
6432                     || (prefixes & PREFIX_LOCK)
6433                     || s->aflag == MO_16
6434                     || a.base < -1) {
6435                     goto illegal_op;
6436                 }
6437                 if (a.base >= 0) {
6438                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6439                 } else {
6440                     tcg_gen_movi_tl(s->A0, 0);
6441                 }
6442                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6443                 if (a.index >= 0) {
6444                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6445                 } else {
6446                     tcg_gen_movi_tl(s->T0, 0);
6447                 }
6448                 if (CODE64(s)) {
6449                     gen_helper_bndstx64(cpu_env, s->A0, s->T0,
6450                                         cpu_bndl[reg], cpu_bndu[reg]);
6451                 } else {
6452                     gen_helper_bndstx32(cpu_env, s->A0, s->T0,
6453                                         cpu_bndl[reg], cpu_bndu[reg]);
6454                 }
6455             }
6456         }
6457         gen_nop_modrm(env, s, modrm);
6458         break;
6459     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6460         modrm = x86_ldub_code(env, s);
6461         gen_nop_modrm(env, s, modrm);
6462         break;
6463 
6464     case 0x120: /* mov reg, crN */
6465     case 0x122: /* mov crN, reg */
6466         if (!check_cpl0(s)) {
6467             break;
6468         }
6469         modrm = x86_ldub_code(env, s);
6470         /*
6471          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6472          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6473          * processors all show that the mod bits are assumed to be 1's,
6474          * regardless of actual values.
6475          */
6476         rm = (modrm & 7) | REX_B(s);
6477         reg = ((modrm >> 3) & 7) | REX_R(s);
6478         switch (reg) {
6479         case 0:
6480             if ((prefixes & PREFIX_LOCK) &&
6481                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6482                 reg = 8;
6483             }
6484             break;
6485         case 2:
6486         case 3:
6487         case 4:
6488         case 8:
6489             break;
6490         default:
6491             goto unknown_op;
6492         }
6493         ot  = (CODE64(s) ? MO_64 : MO_32);
6494 
6495         translator_io_start(&s->base);
6496         if (b & 2) {
6497             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6498             gen_op_mov_v_reg(s, ot, s->T0, rm);
6499             gen_helper_write_crN(cpu_env, tcg_constant_i32(reg), s->T0);
6500             s->base.is_jmp = DISAS_EOB_NEXT;
6501         } else {
6502             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6503             gen_helper_read_crN(s->T0, cpu_env, tcg_constant_i32(reg));
6504             gen_op_mov_reg_v(s, ot, rm, s->T0);
6505         }
6506         break;
6507 
6508     case 0x121: /* mov reg, drN */
6509     case 0x123: /* mov drN, reg */
6510         if (check_cpl0(s)) {
6511             modrm = x86_ldub_code(env, s);
6512             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6513              * AMD documentation (24594.pdf) and testing of
6514              * intel 386 and 486 processors all show that the mod bits
6515              * are assumed to be 1's, regardless of actual values.
6516              */
6517             rm = (modrm & 7) | REX_B(s);
6518             reg = ((modrm >> 3) & 7) | REX_R(s);
6519             if (CODE64(s))
6520                 ot = MO_64;
6521             else
6522                 ot = MO_32;
6523             if (reg >= 8) {
6524                 goto illegal_op;
6525             }
6526             if (b & 2) {
6527                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6528                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6529                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6530                 gen_helper_set_dr(cpu_env, s->tmp2_i32, s->T0);
6531                 s->base.is_jmp = DISAS_EOB_NEXT;
6532             } else {
6533                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6534                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6535                 gen_helper_get_dr(s->T0, cpu_env, s->tmp2_i32);
6536                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6537             }
6538         }
6539         break;
6540     case 0x106: /* clts */
6541         if (check_cpl0(s)) {
6542             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6543             gen_helper_clts(cpu_env);
6544             /* abort block because static cpu state changed */
6545             s->base.is_jmp = DISAS_EOB_NEXT;
6546         }
6547         break;
6548     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6549     case 0x1c3: /* MOVNTI reg, mem */
6550         if (!(s->cpuid_features & CPUID_SSE2))
6551             goto illegal_op;
6552         ot = mo_64_32(dflag);
6553         modrm = x86_ldub_code(env, s);
6554         mod = (modrm >> 6) & 3;
6555         if (mod == 3)
6556             goto illegal_op;
6557         reg = ((modrm >> 3) & 7) | REX_R(s);
6558         /* generate a generic store */
6559         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6560         break;
6561     case 0x1ae:
6562         modrm = x86_ldub_code(env, s);
6563         switch (modrm) {
6564         CASE_MODRM_MEM_OP(0): /* fxsave */
6565             if (!(s->cpuid_features & CPUID_FXSR)
6566                 || (prefixes & PREFIX_LOCK)) {
6567                 goto illegal_op;
6568             }
6569             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6570                 gen_exception(s, EXCP07_PREX);
6571                 break;
6572             }
6573             gen_lea_modrm(env, s, modrm);
6574             gen_helper_fxsave(cpu_env, s->A0);
6575             break;
6576 
6577         CASE_MODRM_MEM_OP(1): /* fxrstor */
6578             if (!(s->cpuid_features & CPUID_FXSR)
6579                 || (prefixes & PREFIX_LOCK)) {
6580                 goto illegal_op;
6581             }
6582             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6583                 gen_exception(s, EXCP07_PREX);
6584                 break;
6585             }
6586             gen_lea_modrm(env, s, modrm);
6587             gen_helper_fxrstor(cpu_env, s->A0);
6588             break;
6589 
6590         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6591             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6592                 goto illegal_op;
6593             }
6594             if (s->flags & HF_TS_MASK) {
6595                 gen_exception(s, EXCP07_PREX);
6596                 break;
6597             }
6598             gen_lea_modrm(env, s, modrm);
6599             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6600             gen_helper_ldmxcsr(cpu_env, s->tmp2_i32);
6601             break;
6602 
6603         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6604             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6605                 goto illegal_op;
6606             }
6607             if (s->flags & HF_TS_MASK) {
6608                 gen_exception(s, EXCP07_PREX);
6609                 break;
6610             }
6611             gen_helper_update_mxcsr(cpu_env);
6612             gen_lea_modrm(env, s, modrm);
6613             tcg_gen_ld32u_tl(s->T0, cpu_env, offsetof(CPUX86State, mxcsr));
6614             gen_op_st_v(s, MO_32, s->T0, s->A0);
6615             break;
6616 
6617         CASE_MODRM_MEM_OP(4): /* xsave */
6618             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6619                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6620                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6621                 goto illegal_op;
6622             }
6623             gen_lea_modrm(env, s, modrm);
6624             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6625                                   cpu_regs[R_EDX]);
6626             gen_helper_xsave(cpu_env, s->A0, s->tmp1_i64);
6627             break;
6628 
6629         CASE_MODRM_MEM_OP(5): /* xrstor */
6630             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6631                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6632                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6633                 goto illegal_op;
6634             }
6635             gen_lea_modrm(env, s, modrm);
6636             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6637                                   cpu_regs[R_EDX]);
6638             gen_helper_xrstor(cpu_env, s->A0, s->tmp1_i64);
6639             /* XRSTOR is how MPX is enabled, which changes how
6640                we translate.  Thus we need to end the TB.  */
6641             s->base.is_jmp = DISAS_EOB_NEXT;
6642             break;
6643 
6644         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6645             if (prefixes & PREFIX_LOCK) {
6646                 goto illegal_op;
6647             }
6648             if (prefixes & PREFIX_DATA) {
6649                 /* clwb */
6650                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6651                     goto illegal_op;
6652                 }
6653                 gen_nop_modrm(env, s, modrm);
6654             } else {
6655                 /* xsaveopt */
6656                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6657                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6658                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6659                     goto illegal_op;
6660                 }
6661                 gen_lea_modrm(env, s, modrm);
6662                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6663                                       cpu_regs[R_EDX]);
6664                 gen_helper_xsaveopt(cpu_env, s->A0, s->tmp1_i64);
6665             }
6666             break;
6667 
6668         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6669             if (prefixes & PREFIX_LOCK) {
6670                 goto illegal_op;
6671             }
6672             if (prefixes & PREFIX_DATA) {
6673                 /* clflushopt */
6674                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6675                     goto illegal_op;
6676                 }
6677             } else {
6678                 /* clflush */
6679                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6680                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6681                     goto illegal_op;
6682                 }
6683             }
6684             gen_nop_modrm(env, s, modrm);
6685             break;
6686 
6687         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6688         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6689         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6690         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6691             if (CODE64(s)
6692                 && (prefixes & PREFIX_REPZ)
6693                 && !(prefixes & PREFIX_LOCK)
6694                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6695                 TCGv base, treg, src, dst;
6696 
6697                 /* Preserve hflags bits by testing CR4 at runtime.  */
6698                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6699                 gen_helper_cr4_testbit(cpu_env, s->tmp2_i32);
6700 
6701                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6702                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6703 
6704                 if (modrm & 0x10) {
6705                     /* wr*base */
6706                     dst = base, src = treg;
6707                 } else {
6708                     /* rd*base */
6709                     dst = treg, src = base;
6710                 }
6711 
6712                 if (s->dflag == MO_32) {
6713                     tcg_gen_ext32u_tl(dst, src);
6714                 } else {
6715                     tcg_gen_mov_tl(dst, src);
6716                 }
6717                 break;
6718             }
6719             goto unknown_op;
6720 
6721         case 0xf8: /* sfence / pcommit */
6722             if (prefixes & PREFIX_DATA) {
6723                 /* pcommit */
6724                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6725                     || (prefixes & PREFIX_LOCK)) {
6726                     goto illegal_op;
6727                 }
6728                 break;
6729             }
6730             /* fallthru */
6731         case 0xf9 ... 0xff: /* sfence */
6732             if (!(s->cpuid_features & CPUID_SSE)
6733                 || (prefixes & PREFIX_LOCK)) {
6734                 goto illegal_op;
6735             }
6736             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6737             break;
6738         case 0xe8 ... 0xef: /* lfence */
6739             if (!(s->cpuid_features & CPUID_SSE)
6740                 || (prefixes & PREFIX_LOCK)) {
6741                 goto illegal_op;
6742             }
6743             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6744             break;
6745         case 0xf0 ... 0xf7: /* mfence */
6746             if (!(s->cpuid_features & CPUID_SSE2)
6747                 || (prefixes & PREFIX_LOCK)) {
6748                 goto illegal_op;
6749             }
6750             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6751             break;
6752 
6753         default:
6754             goto unknown_op;
6755         }
6756         break;
6757 
6758     case 0x10d: /* 3DNow! prefetch(w) */
6759         modrm = x86_ldub_code(env, s);
6760         mod = (modrm >> 6) & 3;
6761         if (mod == 3)
6762             goto illegal_op;
6763         gen_nop_modrm(env, s, modrm);
6764         break;
6765     case 0x1aa: /* rsm */
6766         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6767         if (!(s->flags & HF_SMM_MASK))
6768             goto illegal_op;
6769 #ifdef CONFIG_USER_ONLY
6770         /* we should not be in SMM mode */
6771         g_assert_not_reached();
6772 #else
6773         gen_update_cc_op(s);
6774         gen_update_eip_next(s);
6775         gen_helper_rsm(cpu_env);
6776 #endif /* CONFIG_USER_ONLY */
6777         s->base.is_jmp = DISAS_EOB_ONLY;
6778         break;
6779     case 0x1b8: /* SSE4.2 popcnt */
6780         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6781              PREFIX_REPZ)
6782             goto illegal_op;
6783         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6784             goto illegal_op;
6785 
6786         modrm = x86_ldub_code(env, s);
6787         reg = ((modrm >> 3) & 7) | REX_R(s);
6788 
6789         if (s->prefix & PREFIX_DATA) {
6790             ot = MO_16;
6791         } else {
6792             ot = mo_64_32(dflag);
6793         }
6794 
6795         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6796         gen_extu(ot, s->T0);
6797         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6798         tcg_gen_ctpop_tl(s->T0, s->T0);
6799         gen_op_mov_reg_v(s, ot, reg, s->T0);
6800 
6801         set_cc_op(s, CC_OP_POPCNT);
6802         break;
6803     case 0x10e ... 0x117:
6804     case 0x128 ... 0x12f:
6805     case 0x138 ... 0x13a:
6806     case 0x150 ... 0x179:
6807     case 0x17c ... 0x17f:
6808     case 0x1c2:
6809     case 0x1c4 ... 0x1c6:
6810     case 0x1d0 ... 0x1fe:
6811         disas_insn_new(s, cpu, b);
6812         break;
6813     default:
6814         goto unknown_op;
6815     }
6816     return true;
6817  illegal_op:
6818     gen_illegal_opcode(s);
6819     return true;
6820  unknown_op:
6821     gen_unknown_opcode(env, s);
6822     return true;
6823 }
6824 
6825 void tcg_x86_init(void)
6826 {
6827     static const char reg_names[CPU_NB_REGS][4] = {
6828 #ifdef TARGET_X86_64
6829         [R_EAX] = "rax",
6830         [R_EBX] = "rbx",
6831         [R_ECX] = "rcx",
6832         [R_EDX] = "rdx",
6833         [R_ESI] = "rsi",
6834         [R_EDI] = "rdi",
6835         [R_EBP] = "rbp",
6836         [R_ESP] = "rsp",
6837         [8]  = "r8",
6838         [9]  = "r9",
6839         [10] = "r10",
6840         [11] = "r11",
6841         [12] = "r12",
6842         [13] = "r13",
6843         [14] = "r14",
6844         [15] = "r15",
6845 #else
6846         [R_EAX] = "eax",
6847         [R_EBX] = "ebx",
6848         [R_ECX] = "ecx",
6849         [R_EDX] = "edx",
6850         [R_ESI] = "esi",
6851         [R_EDI] = "edi",
6852         [R_EBP] = "ebp",
6853         [R_ESP] = "esp",
6854 #endif
6855     };
6856     static const char eip_name[] = {
6857 #ifdef TARGET_X86_64
6858         "rip"
6859 #else
6860         "eip"
6861 #endif
6862     };
6863     static const char seg_base_names[6][8] = {
6864         [R_CS] = "cs_base",
6865         [R_DS] = "ds_base",
6866         [R_ES] = "es_base",
6867         [R_FS] = "fs_base",
6868         [R_GS] = "gs_base",
6869         [R_SS] = "ss_base",
6870     };
6871     static const char bnd_regl_names[4][8] = {
6872         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6873     };
6874     static const char bnd_regu_names[4][8] = {
6875         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6876     };
6877     int i;
6878 
6879     cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
6880                                        offsetof(CPUX86State, cc_op), "cc_op");
6881     cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
6882                                     "cc_dst");
6883     cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
6884                                     "cc_src");
6885     cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
6886                                      "cc_src2");
6887     cpu_eip = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, eip), eip_name);
6888 
6889     for (i = 0; i < CPU_NB_REGS; ++i) {
6890         cpu_regs[i] = tcg_global_mem_new(cpu_env,
6891                                          offsetof(CPUX86State, regs[i]),
6892                                          reg_names[i]);
6893     }
6894 
6895     for (i = 0; i < 6; ++i) {
6896         cpu_seg_base[i]
6897             = tcg_global_mem_new(cpu_env,
6898                                  offsetof(CPUX86State, segs[i].base),
6899                                  seg_base_names[i]);
6900     }
6901 
6902     for (i = 0; i < 4; ++i) {
6903         cpu_bndl[i]
6904             = tcg_global_mem_new_i64(cpu_env,
6905                                      offsetof(CPUX86State, bnd_regs[i].lb),
6906                                      bnd_regl_names[i]);
6907         cpu_bndu[i]
6908             = tcg_global_mem_new_i64(cpu_env,
6909                                      offsetof(CPUX86State, bnd_regs[i].ub),
6910                                      bnd_regu_names[i]);
6911     }
6912 }
6913 
6914 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6915 {
6916     DisasContext *dc = container_of(dcbase, DisasContext, base);
6917     CPUX86State *env = cpu->env_ptr;
6918     uint32_t flags = dc->base.tb->flags;
6919     uint32_t cflags = tb_cflags(dc->base.tb);
6920     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6921     int iopl = (flags >> IOPL_SHIFT) & 3;
6922 
6923     dc->cs_base = dc->base.tb->cs_base;
6924     dc->pc_save = dc->base.pc_next;
6925     dc->flags = flags;
6926 #ifndef CONFIG_USER_ONLY
6927     dc->cpl = cpl;
6928     dc->iopl = iopl;
6929 #endif
6930 
6931     /* We make some simplifying assumptions; validate they're correct. */
6932     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6933     g_assert(CPL(dc) == cpl);
6934     g_assert(IOPL(dc) == iopl);
6935     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6936     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6937     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6938     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6939     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6940     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6941     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6942     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6943 
6944     dc->cc_op = CC_OP_DYNAMIC;
6945     dc->cc_op_dirty = false;
6946     dc->popl_esp_hack = 0;
6947     /* select memory access functions */
6948     dc->mem_index = cpu_mmu_index(env, false);
6949     dc->cpuid_features = env->features[FEAT_1_EDX];
6950     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6951     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6952     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6953     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6954     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6955     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6956     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6957                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6958     /*
6959      * If jmp_opt, we want to handle each string instruction individually.
6960      * For icount also disable repz optimization so that each iteration
6961      * is accounted separately.
6962      */
6963     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6964 
6965     dc->T0 = tcg_temp_new();
6966     dc->T1 = tcg_temp_new();
6967     dc->A0 = tcg_temp_new();
6968 
6969     dc->tmp0 = tcg_temp_new();
6970     dc->tmp1_i64 = tcg_temp_new_i64();
6971     dc->tmp2_i32 = tcg_temp_new_i32();
6972     dc->tmp3_i32 = tcg_temp_new_i32();
6973     dc->tmp4 = tcg_temp_new();
6974     dc->cc_srcT = tcg_temp_new();
6975 }
6976 
6977 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6978 {
6979 }
6980 
6981 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6982 {
6983     DisasContext *dc = container_of(dcbase, DisasContext, base);
6984     target_ulong pc_arg = dc->base.pc_next;
6985 
6986     dc->prev_insn_end = tcg_last_op();
6987     if (tb_cflags(dcbase->tb) & CF_PCREL) {
6988         pc_arg -= dc->cs_base;
6989         pc_arg &= ~TARGET_PAGE_MASK;
6990     }
6991     tcg_gen_insn_start(pc_arg, dc->cc_op);
6992 }
6993 
6994 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6995 {
6996     DisasContext *dc = container_of(dcbase, DisasContext, base);
6997 
6998 #ifdef TARGET_VSYSCALL_PAGE
6999     /*
7000      * Detect entry into the vsyscall page and invoke the syscall.
7001      */
7002     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7003         gen_exception(dc, EXCP_VSYSCALL);
7004         dc->base.pc_next = dc->pc + 1;
7005         return;
7006     }
7007 #endif
7008 
7009     if (disas_insn(dc, cpu)) {
7010         target_ulong pc_next = dc->pc;
7011         dc->base.pc_next = pc_next;
7012 
7013         if (dc->base.is_jmp == DISAS_NEXT) {
7014             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7015                 /*
7016                  * If single step mode, we generate only one instruction and
7017                  * generate an exception.
7018                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7019                  * the flag and abort the translation to give the irqs a
7020                  * chance to happen.
7021                  */
7022                 dc->base.is_jmp = DISAS_EOB_NEXT;
7023             } else if (!is_same_page(&dc->base, pc_next)) {
7024                 dc->base.is_jmp = DISAS_TOO_MANY;
7025             }
7026         }
7027     }
7028 }
7029 
7030 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7031 {
7032     DisasContext *dc = container_of(dcbase, DisasContext, base);
7033 
7034     switch (dc->base.is_jmp) {
7035     case DISAS_NORETURN:
7036         break;
7037     case DISAS_TOO_MANY:
7038         gen_update_cc_op(dc);
7039         gen_jmp_rel_csize(dc, 0, 0);
7040         break;
7041     case DISAS_EOB_NEXT:
7042         gen_update_cc_op(dc);
7043         gen_update_eip_cur(dc);
7044         /* fall through */
7045     case DISAS_EOB_ONLY:
7046         gen_eob(dc);
7047         break;
7048     case DISAS_EOB_INHIBIT_IRQ:
7049         gen_update_cc_op(dc);
7050         gen_update_eip_cur(dc);
7051         gen_eob_inhibit_irq(dc, true);
7052         break;
7053     case DISAS_JUMP:
7054         gen_jr(dc);
7055         break;
7056     default:
7057         g_assert_not_reached();
7058     }
7059 }
7060 
7061 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7062                               CPUState *cpu, FILE *logfile)
7063 {
7064     DisasContext *dc = container_of(dcbase, DisasContext, base);
7065 
7066     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7067     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7068 }
7069 
7070 static const TranslatorOps i386_tr_ops = {
7071     .init_disas_context = i386_tr_init_disas_context,
7072     .tb_start           = i386_tr_tb_start,
7073     .insn_start         = i386_tr_insn_start,
7074     .translate_insn     = i386_tr_translate_insn,
7075     .tb_stop            = i386_tr_tb_stop,
7076     .disas_log          = i386_tr_disas_log,
7077 };
7078 
7079 /* generate intermediate code for basic block 'tb'.  */
7080 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7081                            target_ulong pc, void *host_pc)
7082 {
7083     DisasContext dc;
7084 
7085     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7086 }
7087