xref: /qemu/target/i386/tcg/translate.c (revision 02326733)
1 /*
2  *  i386 translation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
30 
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
34 
35 #include "exec/log.h"
36 
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
39 #undef  HELPER_H
40 
41 
42 #define PREFIX_REPZ   0x01
43 #define PREFIX_REPNZ  0x02
44 #define PREFIX_LOCK   0x04
45 #define PREFIX_DATA   0x08
46 #define PREFIX_ADR    0x10
47 #define PREFIX_VEX    0x20
48 #define PREFIX_REX    0x40
49 
50 #ifdef TARGET_X86_64
51 # define ctztl  ctz64
52 # define clztl  clz64
53 #else
54 # define ctztl  ctz32
55 # define clztl  clz32
56 #endif
57 
58 /* For a switch indexed by MODRM, match all memory operands for a given OP.  */
59 #define CASE_MODRM_MEM_OP(OP) \
60     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
63 
64 #define CASE_MODRM_OP(OP) \
65     case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66     case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67     case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68     case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
69 
70 //#define MACRO_TEST   1
71 
72 /* global register indexes */
73 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
74 static TCGv cpu_eip;
75 static TCGv_i32 cpu_cc_op;
76 static TCGv cpu_regs[CPU_NB_REGS];
77 static TCGv cpu_seg_base[6];
78 static TCGv_i64 cpu_bndl[4];
79 static TCGv_i64 cpu_bndu[4];
80 
81 typedef struct DisasContext {
82     DisasContextBase base;
83 
84     target_ulong pc;       /* pc = eip + cs_base */
85     target_ulong cs_base;  /* base of CS segment */
86     target_ulong pc_save;
87 
88     MemOp aflag;
89     MemOp dflag;
90 
91     int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
92     uint8_t prefix;
93 
94     bool has_modrm;
95     uint8_t modrm;
96 
97 #ifndef CONFIG_USER_ONLY
98     uint8_t cpl;   /* code priv level */
99     uint8_t iopl;  /* i/o priv level */
100 #endif
101     uint8_t vex_l;  /* vex vector length */
102     uint8_t vex_v;  /* vex vvvv register, without 1's complement.  */
103     uint8_t popl_esp_hack; /* for correct popl with esp base handling */
104     uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
105 
106 #ifdef TARGET_X86_64
107     uint8_t rex_r;
108     uint8_t rex_x;
109     uint8_t rex_b;
110 #endif
111     bool vex_w; /* used by AVX even on 32-bit processors */
112     bool jmp_opt; /* use direct block chaining for direct jumps */
113     bool repz_opt; /* optimize jumps within repz instructions */
114     bool cc_op_dirty;
115 
116     CCOp cc_op;  /* current CC operation */
117     int mem_index; /* select memory access functions */
118     uint32_t flags; /* all execution flags */
119     int cpuid_features;
120     int cpuid_ext_features;
121     int cpuid_ext2_features;
122     int cpuid_ext3_features;
123     int cpuid_7_0_ebx_features;
124     int cpuid_7_0_ecx_features;
125     int cpuid_7_1_eax_features;
126     int cpuid_xsave_features;
127 
128     /* TCG local temps */
129     TCGv cc_srcT;
130     TCGv A0;
131     TCGv T0;
132     TCGv T1;
133 
134     /* TCG local register indexes (only used inside old micro ops) */
135     TCGv tmp0;
136     TCGv tmp4;
137     TCGv_i32 tmp2_i32;
138     TCGv_i32 tmp3_i32;
139     TCGv_i64 tmp1_i64;
140 
141     sigjmp_buf jmpbuf;
142     TCGOp *prev_insn_start;
143     TCGOp *prev_insn_end;
144 } DisasContext;
145 
146 #define DISAS_EOB_ONLY         DISAS_TARGET_0
147 #define DISAS_EOB_NEXT         DISAS_TARGET_1
148 #define DISAS_EOB_INHIBIT_IRQ  DISAS_TARGET_2
149 #define DISAS_JUMP             DISAS_TARGET_3
150 
151 /* The environment in which user-only runs is constrained. */
152 #ifdef CONFIG_USER_ONLY
153 #define PE(S)     true
154 #define CPL(S)    3
155 #define IOPL(S)   0
156 #define SVME(S)   false
157 #define GUEST(S)  false
158 #else
159 #define PE(S)     (((S)->flags & HF_PE_MASK) != 0)
160 #define CPL(S)    ((S)->cpl)
161 #define IOPL(S)   ((S)->iopl)
162 #define SVME(S)   (((S)->flags & HF_SVME_MASK) != 0)
163 #define GUEST(S)  (((S)->flags & HF_GUEST_MASK) != 0)
164 #endif
165 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
166 #define VM86(S)   false
167 #define CODE32(S) true
168 #define SS32(S)   true
169 #define ADDSEG(S) false
170 #else
171 #define VM86(S)   (((S)->flags & HF_VM_MASK) != 0)
172 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
173 #define SS32(S)   (((S)->flags & HF_SS32_MASK) != 0)
174 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
175 #endif
176 #if !defined(TARGET_X86_64)
177 #define CODE64(S) false
178 #elif defined(CONFIG_USER_ONLY)
179 #define CODE64(S) true
180 #else
181 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
182 #endif
183 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
184 #define LMA(S)    (((S)->flags & HF_LMA_MASK) != 0)
185 #else
186 #define LMA(S)    false
187 #endif
188 
189 #ifdef TARGET_X86_64
190 #define REX_PREFIX(S)  (((S)->prefix & PREFIX_REX) != 0)
191 #define REX_W(S)       ((S)->vex_w)
192 #define REX_R(S)       ((S)->rex_r + 0)
193 #define REX_X(S)       ((S)->rex_x + 0)
194 #define REX_B(S)       ((S)->rex_b + 0)
195 #else
196 #define REX_PREFIX(S)  false
197 #define REX_W(S)       false
198 #define REX_R(S)       0
199 #define REX_X(S)       0
200 #define REX_B(S)       0
201 #endif
202 
203 /*
204  * Many sysemu-only helpers are not reachable for user-only.
205  * Define stub generators here, so that we need not either sprinkle
206  * ifdefs through the translator, nor provide the helper function.
207  */
208 #define STUB_HELPER(NAME, ...) \
209     static inline void gen_helper_##NAME(__VA_ARGS__) \
210     { qemu_build_not_reached(); }
211 
212 #ifdef CONFIG_USER_ONLY
213 STUB_HELPER(clgi, TCGv_env env)
214 STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
215 STUB_HELPER(hlt, TCGv_env env, TCGv_i32 pc_ofs)
216 STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
217 STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
218 STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
219 STUB_HELPER(monitor, TCGv_env env, TCGv addr)
220 STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
221 STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
222 STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
223 STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
224 STUB_HELPER(rdmsr, TCGv_env env)
225 STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
226 STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
227 STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
228 STUB_HELPER(stgi, TCGv_env env)
229 STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
230 STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
231 STUB_HELPER(vmmcall, TCGv_env env)
232 STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
233 STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
234 STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
235 STUB_HELPER(wrmsr, TCGv_env env)
236 #endif
237 
238 static void gen_eob(DisasContext *s);
239 static void gen_jr(DisasContext *s);
240 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
241 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
242 static void gen_op(DisasContext *s1, int op, MemOp ot, int d);
243 static void gen_exception_gpf(DisasContext *s);
244 
245 /* i386 arith/logic operations */
246 enum {
247     OP_ADDL,
248     OP_ORL,
249     OP_ADCL,
250     OP_SBBL,
251     OP_ANDL,
252     OP_SUBL,
253     OP_XORL,
254     OP_CMPL,
255 };
256 
257 /* i386 shift ops */
258 enum {
259     OP_ROL,
260     OP_ROR,
261     OP_RCL,
262     OP_RCR,
263     OP_SHL,
264     OP_SHR,
265     OP_SHL1, /* undocumented */
266     OP_SAR = 7,
267 };
268 
269 enum {
270     JCC_O,
271     JCC_B,
272     JCC_Z,
273     JCC_BE,
274     JCC_S,
275     JCC_P,
276     JCC_L,
277     JCC_LE,
278 };
279 
280 enum {
281     /* I386 int registers */
282     OR_EAX,   /* MUST be even numbered */
283     OR_ECX,
284     OR_EDX,
285     OR_EBX,
286     OR_ESP,
287     OR_EBP,
288     OR_ESI,
289     OR_EDI,
290 
291     OR_TMP0 = 16,    /* temporary operand register */
292     OR_TMP1,
293     OR_A0, /* temporary register used when doing address evaluation */
294 };
295 
296 enum {
297     USES_CC_DST  = 1,
298     USES_CC_SRC  = 2,
299     USES_CC_SRC2 = 4,
300     USES_CC_SRCT = 8,
301 };
302 
303 /* Bit set if the global variable is live after setting CC_OP to X.  */
304 static const uint8_t cc_op_live[CC_OP_NB] = {
305     [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
306     [CC_OP_EFLAGS] = USES_CC_SRC,
307     [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
308     [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
309     [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
310     [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
311     [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
312     [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
313     [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
314     [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
315     [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
316     [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
317     [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
318     [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
319     [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
320     [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
321     [CC_OP_CLR] = 0,
322     [CC_OP_POPCNT] = USES_CC_SRC,
323 };
324 
325 static void set_cc_op(DisasContext *s, CCOp op)
326 {
327     int dead;
328 
329     if (s->cc_op == op) {
330         return;
331     }
332 
333     /* Discard CC computation that will no longer be used.  */
334     dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
335     if (dead & USES_CC_DST) {
336         tcg_gen_discard_tl(cpu_cc_dst);
337     }
338     if (dead & USES_CC_SRC) {
339         tcg_gen_discard_tl(cpu_cc_src);
340     }
341     if (dead & USES_CC_SRC2) {
342         tcg_gen_discard_tl(cpu_cc_src2);
343     }
344     if (dead & USES_CC_SRCT) {
345         tcg_gen_discard_tl(s->cc_srcT);
346     }
347 
348     if (op == CC_OP_DYNAMIC) {
349         /* The DYNAMIC setting is translator only, and should never be
350            stored.  Thus we always consider it clean.  */
351         s->cc_op_dirty = false;
352     } else {
353         /* Discard any computed CC_OP value (see shifts).  */
354         if (s->cc_op == CC_OP_DYNAMIC) {
355             tcg_gen_discard_i32(cpu_cc_op);
356         }
357         s->cc_op_dirty = true;
358     }
359     s->cc_op = op;
360 }
361 
362 static void gen_update_cc_op(DisasContext *s)
363 {
364     if (s->cc_op_dirty) {
365         tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
366         s->cc_op_dirty = false;
367     }
368 }
369 
370 #ifdef TARGET_X86_64
371 
372 #define NB_OP_SIZES 4
373 
374 #else /* !TARGET_X86_64 */
375 
376 #define NB_OP_SIZES 3
377 
378 #endif /* !TARGET_X86_64 */
379 
380 #if HOST_BIG_ENDIAN
381 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
382 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
383 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
384 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
385 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
386 #else
387 #define REG_B_OFFSET 0
388 #define REG_H_OFFSET 1
389 #define REG_W_OFFSET 0
390 #define REG_L_OFFSET 0
391 #define REG_LH_OFFSET 4
392 #endif
393 
394 /* In instruction encodings for byte register accesses the
395  * register number usually indicates "low 8 bits of register N";
396  * however there are some special cases where N 4..7 indicates
397  * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
398  * true for this special case, false otherwise.
399  */
400 static inline bool byte_reg_is_xH(DisasContext *s, int reg)
401 {
402     /* Any time the REX prefix is present, byte registers are uniform */
403     if (reg < 4 || REX_PREFIX(s)) {
404         return false;
405     }
406     return true;
407 }
408 
409 /* Select the size of a push/pop operation.  */
410 static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
411 {
412     if (CODE64(s)) {
413         return ot == MO_16 ? MO_16 : MO_64;
414     } else {
415         return ot;
416     }
417 }
418 
419 /* Select the size of the stack pointer.  */
420 static inline MemOp mo_stacksize(DisasContext *s)
421 {
422     return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
423 }
424 
425 /* Select only size 64 else 32.  Used for SSE operand sizes.  */
426 static inline MemOp mo_64_32(MemOp ot)
427 {
428 #ifdef TARGET_X86_64
429     return ot == MO_64 ? MO_64 : MO_32;
430 #else
431     return MO_32;
432 #endif
433 }
434 
435 /* Select size 8 if lsb of B is clear, else OT.  Used for decoding
436    byte vs word opcodes.  */
437 static inline MemOp mo_b_d(int b, MemOp ot)
438 {
439     return b & 1 ? ot : MO_8;
440 }
441 
442 /* Select size 8 if lsb of B is clear, else OT capped at 32.
443    Used for decoding operand size of port opcodes.  */
444 static inline MemOp mo_b_d32(int b, MemOp ot)
445 {
446     return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
447 }
448 
449 /* Compute the result of writing t0 to the OT-sized register REG.
450  *
451  * If DEST is NULL, store the result into the register and return the
452  * register's TCGv.
453  *
454  * If DEST is not NULL, store the result into DEST and return the
455  * register's TCGv.
456  */
457 static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
458 {
459     switch(ot) {
460     case MO_8:
461         if (byte_reg_is_xH(s, reg)) {
462             dest = dest ? dest : cpu_regs[reg - 4];
463             tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
464             return cpu_regs[reg - 4];
465         }
466         dest = dest ? dest : cpu_regs[reg];
467         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
468         break;
469     case MO_16:
470         dest = dest ? dest : cpu_regs[reg];
471         tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
472         break;
473     case MO_32:
474         /* For x86_64, this sets the higher half of register to zero.
475            For i386, this is equivalent to a mov. */
476         dest = dest ? dest : cpu_regs[reg];
477         tcg_gen_ext32u_tl(dest, t0);
478         break;
479 #ifdef TARGET_X86_64
480     case MO_64:
481         dest = dest ? dest : cpu_regs[reg];
482         tcg_gen_mov_tl(dest, t0);
483         break;
484 #endif
485     default:
486         g_assert_not_reached();
487     }
488     return cpu_regs[reg];
489 }
490 
491 static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
492 {
493     gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
494 }
495 
496 static inline
497 void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
498 {
499     if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
500         tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
501     } else {
502         tcg_gen_mov_tl(t0, cpu_regs[reg]);
503     }
504 }
505 
506 static void gen_add_A0_im(DisasContext *s, int val)
507 {
508     tcg_gen_addi_tl(s->A0, s->A0, val);
509     if (!CODE64(s)) {
510         tcg_gen_ext32u_tl(s->A0, s->A0);
511     }
512 }
513 
514 static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
515 {
516     tcg_gen_mov_tl(cpu_eip, dest);
517     s->pc_save = -1;
518 }
519 
520 static inline
521 void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
522 {
523     tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
524     gen_op_mov_reg_v(s, size, reg, s->tmp0);
525 }
526 
527 static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
528 {
529     tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
530     gen_op_mov_reg_v(s, size, reg, s->tmp0);
531 }
532 
533 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
534 {
535     tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
536 }
537 
538 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
539 {
540     tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
541 }
542 
543 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
544 {
545     if (d == OR_TMP0) {
546         gen_op_st_v(s, idx, s->T0, s->A0);
547     } else {
548         gen_op_mov_reg_v(s, idx, d, s->T0);
549     }
550 }
551 
552 static void gen_update_eip_cur(DisasContext *s)
553 {
554     assert(s->pc_save != -1);
555     if (tb_cflags(s->base.tb) & CF_PCREL) {
556         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
557     } else if (CODE64(s)) {
558         tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
559     } else {
560         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
561     }
562     s->pc_save = s->base.pc_next;
563 }
564 
565 static void gen_update_eip_next(DisasContext *s)
566 {
567     assert(s->pc_save != -1);
568     if (tb_cflags(s->base.tb) & CF_PCREL) {
569         tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
570     } else if (CODE64(s)) {
571         tcg_gen_movi_tl(cpu_eip, s->pc);
572     } else {
573         tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
574     }
575     s->pc_save = s->pc;
576 }
577 
578 static int cur_insn_len(DisasContext *s)
579 {
580     return s->pc - s->base.pc_next;
581 }
582 
583 static TCGv_i32 cur_insn_len_i32(DisasContext *s)
584 {
585     return tcg_constant_i32(cur_insn_len(s));
586 }
587 
588 static TCGv_i32 eip_next_i32(DisasContext *s)
589 {
590     assert(s->pc_save != -1);
591     /*
592      * This function has two users: lcall_real (always 16-bit mode), and
593      * iret_protected (16, 32, or 64-bit mode).  IRET only uses the value
594      * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
595      * why passing a 32-bit value isn't broken.  To avoid using this where
596      * we shouldn't, return -1 in 64-bit mode so that execution goes into
597      * the weeds quickly.
598      */
599     if (CODE64(s)) {
600         return tcg_constant_i32(-1);
601     }
602     if (tb_cflags(s->base.tb) & CF_PCREL) {
603         TCGv_i32 ret = tcg_temp_new_i32();
604         tcg_gen_trunc_tl_i32(ret, cpu_eip);
605         tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
606         return ret;
607     } else {
608         return tcg_constant_i32(s->pc - s->cs_base);
609     }
610 }
611 
612 static TCGv eip_next_tl(DisasContext *s)
613 {
614     assert(s->pc_save != -1);
615     if (tb_cflags(s->base.tb) & CF_PCREL) {
616         TCGv ret = tcg_temp_new();
617         tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
618         return ret;
619     } else if (CODE64(s)) {
620         return tcg_constant_tl(s->pc);
621     } else {
622         return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
623     }
624 }
625 
626 static TCGv eip_cur_tl(DisasContext *s)
627 {
628     assert(s->pc_save != -1);
629     if (tb_cflags(s->base.tb) & CF_PCREL) {
630         TCGv ret = tcg_temp_new();
631         tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
632         return ret;
633     } else if (CODE64(s)) {
634         return tcg_constant_tl(s->base.pc_next);
635     } else {
636         return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
637     }
638 }
639 
640 /* Compute SEG:REG into DEST.  SEG is selected from the override segment
641    (OVR_SEG) and the default segment (DEF_SEG).  OVR_SEG may be -1 to
642    indicate no override.  */
643 static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
644                                int def_seg, int ovr_seg)
645 {
646     switch (aflag) {
647 #ifdef TARGET_X86_64
648     case MO_64:
649         if (ovr_seg < 0) {
650             tcg_gen_mov_tl(dest, a0);
651             return;
652         }
653         break;
654 #endif
655     case MO_32:
656         /* 32 bit address */
657         if (ovr_seg < 0 && ADDSEG(s)) {
658             ovr_seg = def_seg;
659         }
660         if (ovr_seg < 0) {
661             tcg_gen_ext32u_tl(dest, a0);
662             return;
663         }
664         break;
665     case MO_16:
666         /* 16 bit address */
667         tcg_gen_ext16u_tl(dest, a0);
668         a0 = dest;
669         if (ovr_seg < 0) {
670             if (ADDSEG(s)) {
671                 ovr_seg = def_seg;
672             } else {
673                 return;
674             }
675         }
676         break;
677     default:
678         g_assert_not_reached();
679     }
680 
681     if (ovr_seg >= 0) {
682         TCGv seg = cpu_seg_base[ovr_seg];
683 
684         if (aflag == MO_64) {
685             tcg_gen_add_tl(dest, a0, seg);
686         } else if (CODE64(s)) {
687             tcg_gen_ext32u_tl(dest, a0);
688             tcg_gen_add_tl(dest, dest, seg);
689         } else {
690             tcg_gen_add_tl(dest, a0, seg);
691             tcg_gen_ext32u_tl(dest, dest);
692         }
693     }
694 }
695 
696 static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
697                           int def_seg, int ovr_seg)
698 {
699     gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
700 }
701 
702 static inline void gen_string_movl_A0_ESI(DisasContext *s)
703 {
704     gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
705 }
706 
707 static inline void gen_string_movl_A0_EDI(DisasContext *s)
708 {
709     gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
710 }
711 
712 static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
713 {
714     TCGv dshift = tcg_temp_new();
715     tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
716     tcg_gen_shli_tl(dshift, dshift, ot);
717     return dshift;
718 };
719 
720 static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
721 {
722     if (size == MO_TL) {
723         return src;
724     }
725     if (!dst) {
726         dst = tcg_temp_new();
727     }
728     tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
729     return dst;
730 }
731 
732 static void gen_extu(MemOp ot, TCGv reg)
733 {
734     gen_ext_tl(reg, reg, ot, false);
735 }
736 
737 static void gen_exts(MemOp ot, TCGv reg)
738 {
739     gen_ext_tl(reg, reg, ot, true);
740 }
741 
742 static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
743 {
744     TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
745 
746     tcg_gen_brcondi_tl(cond, tmp, 0, label1);
747 }
748 
749 static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
750 {
751     gen_op_j_ecx(s, TCG_COND_EQ, label1);
752 }
753 
754 static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
755 {
756     gen_op_j_ecx(s, TCG_COND_NE, label1);
757 }
758 
759 static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
760 {
761     switch (ot) {
762     case MO_8:
763         gen_helper_inb(v, tcg_env, n);
764         break;
765     case MO_16:
766         gen_helper_inw(v, tcg_env, n);
767         break;
768     case MO_32:
769         gen_helper_inl(v, tcg_env, n);
770         break;
771     default:
772         g_assert_not_reached();
773     }
774 }
775 
776 static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
777 {
778     switch (ot) {
779     case MO_8:
780         gen_helper_outb(tcg_env, v, n);
781         break;
782     case MO_16:
783         gen_helper_outw(tcg_env, v, n);
784         break;
785     case MO_32:
786         gen_helper_outl(tcg_env, v, n);
787         break;
788     default:
789         g_assert_not_reached();
790     }
791 }
792 
793 /*
794  * Validate that access to [port, port + 1<<ot) is allowed.
795  * Raise #GP, or VMM exit if not.
796  */
797 static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
798                          uint32_t svm_flags)
799 {
800 #ifdef CONFIG_USER_ONLY
801     /*
802      * We do not implement the ioperm(2) syscall, so the TSS check
803      * will always fail.
804      */
805     gen_exception_gpf(s);
806     return false;
807 #else
808     if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
809         gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
810     }
811     if (GUEST(s)) {
812         gen_update_cc_op(s);
813         gen_update_eip_cur(s);
814         if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
815             svm_flags |= SVM_IOIO_REP_MASK;
816         }
817         svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
818         gen_helper_svm_check_io(tcg_env, port,
819                                 tcg_constant_i32(svm_flags),
820                                 cur_insn_len_i32(s));
821     }
822     return true;
823 #endif
824 }
825 
826 static void gen_movs(DisasContext *s, MemOp ot)
827 {
828     TCGv dshift;
829 
830     gen_string_movl_A0_ESI(s);
831     gen_op_ld_v(s, ot, s->T0, s->A0);
832     gen_string_movl_A0_EDI(s);
833     gen_op_st_v(s, ot, s->T0, s->A0);
834 
835     dshift = gen_compute_Dshift(s, ot);
836     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
837     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
838 }
839 
840 static void gen_op_update1_cc(DisasContext *s)
841 {
842     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
843 }
844 
845 static void gen_op_update2_cc(DisasContext *s)
846 {
847     tcg_gen_mov_tl(cpu_cc_src, s->T1);
848     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
849 }
850 
851 static void gen_op_update3_cc(DisasContext *s, TCGv reg)
852 {
853     tcg_gen_mov_tl(cpu_cc_src2, reg);
854     tcg_gen_mov_tl(cpu_cc_src, s->T1);
855     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
856 }
857 
858 static inline void gen_op_testl_T0_T1_cc(DisasContext *s)
859 {
860     tcg_gen_and_tl(cpu_cc_dst, s->T0, s->T1);
861 }
862 
863 static void gen_op_update_neg_cc(DisasContext *s)
864 {
865     tcg_gen_mov_tl(cpu_cc_dst, s->T0);
866     tcg_gen_neg_tl(cpu_cc_src, s->T0);
867     tcg_gen_movi_tl(s->cc_srcT, 0);
868 }
869 
870 /* compute all eflags to reg */
871 static void gen_mov_eflags(DisasContext *s, TCGv reg)
872 {
873     TCGv dst, src1, src2;
874     TCGv_i32 cc_op;
875     int live, dead;
876 
877     if (s->cc_op == CC_OP_EFLAGS) {
878         tcg_gen_mov_tl(reg, cpu_cc_src);
879         return;
880     }
881     if (s->cc_op == CC_OP_CLR) {
882         tcg_gen_movi_tl(reg, CC_Z | CC_P);
883         return;
884     }
885 
886     dst = cpu_cc_dst;
887     src1 = cpu_cc_src;
888     src2 = cpu_cc_src2;
889 
890     /* Take care to not read values that are not live.  */
891     live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
892     dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
893     if (dead) {
894         TCGv zero = tcg_constant_tl(0);
895         if (dead & USES_CC_DST) {
896             dst = zero;
897         }
898         if (dead & USES_CC_SRC) {
899             src1 = zero;
900         }
901         if (dead & USES_CC_SRC2) {
902             src2 = zero;
903         }
904     }
905 
906     if (s->cc_op != CC_OP_DYNAMIC) {
907         cc_op = tcg_constant_i32(s->cc_op);
908     } else {
909         cc_op = cpu_cc_op;
910     }
911     gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
912 }
913 
914 /* compute all eflags to cc_src */
915 static void gen_compute_eflags(DisasContext *s)
916 {
917     gen_mov_eflags(s, cpu_cc_src);
918     set_cc_op(s, CC_OP_EFLAGS);
919 }
920 
921 typedef struct CCPrepare {
922     TCGCond cond;
923     TCGv reg;
924     TCGv reg2;
925     target_ulong imm;
926     target_ulong mask;
927     bool use_reg2;
928     bool no_setcond;
929 } CCPrepare;
930 
931 /* compute eflags.C to reg */
932 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
933 {
934     TCGv t0, t1;
935     int size, shift;
936 
937     switch (s->cc_op) {
938     case CC_OP_SUBB ... CC_OP_SUBQ:
939         /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
940         size = s->cc_op - CC_OP_SUBB;
941         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
942         /* If no temporary was used, be careful not to alias t1 and t0.  */
943         t0 = t1 == cpu_cc_src ? s->tmp0 : reg;
944         tcg_gen_mov_tl(t0, s->cc_srcT);
945         gen_extu(size, t0);
946         goto add_sub;
947 
948     case CC_OP_ADDB ... CC_OP_ADDQ:
949         /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
950         size = s->cc_op - CC_OP_ADDB;
951         t1 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
952         t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
953     add_sub:
954         return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
955                              .reg2 = t1, .mask = -1, .use_reg2 = true };
956 
957     case CC_OP_LOGICB ... CC_OP_LOGICQ:
958     case CC_OP_CLR:
959     case CC_OP_POPCNT:
960         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
961 
962     case CC_OP_INCB ... CC_OP_INCQ:
963     case CC_OP_DECB ... CC_OP_DECQ:
964         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
965                              .mask = -1, .no_setcond = true };
966 
967     case CC_OP_SHLB ... CC_OP_SHLQ:
968         /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
969         size = s->cc_op - CC_OP_SHLB;
970         shift = (8 << size) - 1;
971         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
972                              .mask = (target_ulong)1 << shift };
973 
974     case CC_OP_MULB ... CC_OP_MULQ:
975         return (CCPrepare) { .cond = TCG_COND_NE,
976                              .reg = cpu_cc_src, .mask = -1 };
977 
978     case CC_OP_BMILGB ... CC_OP_BMILGQ:
979         size = s->cc_op - CC_OP_BMILGB;
980         t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
981         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
982 
983     case CC_OP_ADCX:
984     case CC_OP_ADCOX:
985         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
986                              .mask = -1, .no_setcond = true };
987 
988     case CC_OP_EFLAGS:
989     case CC_OP_SARB ... CC_OP_SARQ:
990         /* CC_SRC & 1 */
991         return (CCPrepare) { .cond = TCG_COND_NE,
992                              .reg = cpu_cc_src, .mask = CC_C };
993 
994     default:
995        /* The need to compute only C from CC_OP_DYNAMIC is important
996           in efficiently implementing e.g. INC at the start of a TB.  */
997        gen_update_cc_op(s);
998        gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
999                                cpu_cc_src2, cpu_cc_op);
1000        return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1001                             .mask = -1, .no_setcond = true };
1002     }
1003 }
1004 
1005 /* compute eflags.P to reg */
1006 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1007 {
1008     gen_compute_eflags(s);
1009     return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1010                          .mask = CC_P };
1011 }
1012 
1013 /* compute eflags.S to reg */
1014 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1015 {
1016     switch (s->cc_op) {
1017     case CC_OP_DYNAMIC:
1018         gen_compute_eflags(s);
1019         /* FALLTHRU */
1020     case CC_OP_EFLAGS:
1021     case CC_OP_ADCX:
1022     case CC_OP_ADOX:
1023     case CC_OP_ADCOX:
1024         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1025                              .mask = CC_S };
1026     case CC_OP_CLR:
1027     case CC_OP_POPCNT:
1028         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1029     default:
1030         {
1031             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1032             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1033             return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1034         }
1035     }
1036 }
1037 
1038 /* compute eflags.O to reg */
1039 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1040 {
1041     switch (s->cc_op) {
1042     case CC_OP_ADOX:
1043     case CC_OP_ADCOX:
1044         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1045                              .mask = -1, .no_setcond = true };
1046     case CC_OP_CLR:
1047     case CC_OP_POPCNT:
1048         return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1049     case CC_OP_MULB ... CC_OP_MULQ:
1050         return (CCPrepare) { .cond = TCG_COND_NE,
1051                              .reg = cpu_cc_src, .mask = -1 };
1052     default:
1053         gen_compute_eflags(s);
1054         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1055                              .mask = CC_O };
1056     }
1057 }
1058 
1059 /* compute eflags.Z to reg */
1060 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1061 {
1062     switch (s->cc_op) {
1063     case CC_OP_DYNAMIC:
1064         gen_compute_eflags(s);
1065         /* FALLTHRU */
1066     case CC_OP_EFLAGS:
1067     case CC_OP_ADCX:
1068     case CC_OP_ADOX:
1069     case CC_OP_ADCOX:
1070         return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1071                              .mask = CC_Z };
1072     case CC_OP_CLR:
1073         return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1074     case CC_OP_POPCNT:
1075         return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
1076                              .mask = -1 };
1077     default:
1078         {
1079             MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
1080             TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1081             return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1082         }
1083     }
1084 }
1085 
1086 /* perform a conditional store into register 'reg' according to jump opcode
1087    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1088 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1089 {
1090     int inv, jcc_op, cond;
1091     MemOp size;
1092     CCPrepare cc;
1093     TCGv t0;
1094 
1095     inv = b & 1;
1096     jcc_op = (b >> 1) & 7;
1097 
1098     switch (s->cc_op) {
1099     case CC_OP_SUBB ... CC_OP_SUBQ:
1100         /* We optimize relational operators for the cmp/jcc case.  */
1101         size = s->cc_op - CC_OP_SUBB;
1102         switch (jcc_op) {
1103         case JCC_BE:
1104             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1105             gen_extu(size, s->tmp4);
1106             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, false);
1107             cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->tmp4,
1108                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1109             break;
1110 
1111         case JCC_L:
1112             cond = TCG_COND_LT;
1113             goto fast_jcc_l;
1114         case JCC_LE:
1115             cond = TCG_COND_LE;
1116         fast_jcc_l:
1117             tcg_gen_mov_tl(s->tmp4, s->cc_srcT);
1118             gen_exts(size, s->tmp4);
1119             t0 = gen_ext_tl(s->tmp0, cpu_cc_src, size, true);
1120             cc = (CCPrepare) { .cond = cond, .reg = s->tmp4,
1121                                .reg2 = t0, .mask = -1, .use_reg2 = true };
1122             break;
1123 
1124         default:
1125             goto slow_jcc;
1126         }
1127         break;
1128 
1129     default:
1130     slow_jcc:
1131         /* This actually generates good code for JC, JZ and JS.  */
1132         switch (jcc_op) {
1133         case JCC_O:
1134             cc = gen_prepare_eflags_o(s, reg);
1135             break;
1136         case JCC_B:
1137             cc = gen_prepare_eflags_c(s, reg);
1138             break;
1139         case JCC_Z:
1140             cc = gen_prepare_eflags_z(s, reg);
1141             break;
1142         case JCC_BE:
1143             gen_compute_eflags(s);
1144             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1145                                .mask = CC_Z | CC_C };
1146             break;
1147         case JCC_S:
1148             cc = gen_prepare_eflags_s(s, reg);
1149             break;
1150         case JCC_P:
1151             cc = gen_prepare_eflags_p(s, reg);
1152             break;
1153         case JCC_L:
1154             gen_compute_eflags(s);
1155             if (reg == cpu_cc_src) {
1156                 reg = s->tmp0;
1157             }
1158             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1159             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1160                                .mask = CC_O };
1161             break;
1162         default:
1163         case JCC_LE:
1164             gen_compute_eflags(s);
1165             if (reg == cpu_cc_src) {
1166                 reg = s->tmp0;
1167             }
1168             tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
1169             cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1170                                .mask = CC_O | CC_Z };
1171             break;
1172         }
1173         break;
1174     }
1175 
1176     if (inv) {
1177         cc.cond = tcg_invert_cond(cc.cond);
1178     }
1179     return cc;
1180 }
1181 
1182 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1183 {
1184     CCPrepare cc = gen_prepare_cc(s, b, reg);
1185 
1186     if (cc.no_setcond) {
1187         if (cc.cond == TCG_COND_EQ) {
1188             tcg_gen_xori_tl(reg, cc.reg, 1);
1189         } else {
1190             tcg_gen_mov_tl(reg, cc.reg);
1191         }
1192         return;
1193     }
1194 
1195     if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1196         cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1197         tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1198         tcg_gen_andi_tl(reg, reg, 1);
1199         return;
1200     }
1201     if (cc.mask != -1) {
1202         tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1203         cc.reg = reg;
1204     }
1205     if (cc.use_reg2) {
1206         tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1207     } else {
1208         tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1209     }
1210 }
1211 
1212 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1213 {
1214     gen_setcc1(s, JCC_B << 1, reg);
1215 }
1216 
1217 /* generate a conditional jump to label 'l1' according to jump opcode
1218    value 'b'. In the fast case, T0 is guaranteed not to be used. */
1219 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1220 {
1221     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1222 
1223     if (cc.mask != -1) {
1224         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1225         cc.reg = s->T0;
1226     }
1227     if (cc.use_reg2) {
1228         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1229     } else {
1230         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1231     }
1232 }
1233 
1234 /* Generate a conditional jump to label 'l1' according to jump opcode
1235    value 'b'. In the fast case, T0 is guaranteed not to be used.
1236    A translation block must end soon.  */
1237 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1238 {
1239     CCPrepare cc = gen_prepare_cc(s, b, s->T0);
1240 
1241     gen_update_cc_op(s);
1242     if (cc.mask != -1) {
1243         tcg_gen_andi_tl(s->T0, cc.reg, cc.mask);
1244         cc.reg = s->T0;
1245     }
1246     set_cc_op(s, CC_OP_DYNAMIC);
1247     if (cc.use_reg2) {
1248         tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1249     } else {
1250         tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1251     }
1252 }
1253 
1254 /* XXX: does not work with gdbstub "ice" single step - not a
1255    serious problem */
1256 static TCGLabel *gen_jz_ecx_string(DisasContext *s)
1257 {
1258     TCGLabel *l1 = gen_new_label();
1259     TCGLabel *l2 = gen_new_label();
1260     gen_op_jnz_ecx(s, l1);
1261     gen_set_label(l2);
1262     gen_jmp_rel_csize(s, 0, 1);
1263     gen_set_label(l1);
1264     return l2;
1265 }
1266 
1267 static void gen_stos(DisasContext *s, MemOp ot)
1268 {
1269     gen_string_movl_A0_EDI(s);
1270     gen_op_st_v(s, ot, s->T0, s->A0);
1271     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1272 }
1273 
1274 static void gen_lods(DisasContext *s, MemOp ot)
1275 {
1276     gen_string_movl_A0_ESI(s);
1277     gen_op_ld_v(s, ot, s->T0, s->A0);
1278     gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
1279     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1280 }
1281 
1282 static void gen_scas(DisasContext *s, MemOp ot)
1283 {
1284     gen_string_movl_A0_EDI(s);
1285     gen_op_ld_v(s, ot, s->T1, s->A0);
1286     tcg_gen_mov_tl(cpu_cc_src, s->T1);
1287     tcg_gen_mov_tl(s->cc_srcT, s->T0);
1288     tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
1289     set_cc_op(s, CC_OP_SUBB + ot);
1290 
1291     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1292 }
1293 
1294 static void gen_cmps(DisasContext *s, MemOp ot)
1295 {
1296     TCGv dshift;
1297 
1298     gen_string_movl_A0_EDI(s);
1299     gen_op_ld_v(s, ot, s->T1, s->A0);
1300     gen_string_movl_A0_ESI(s);
1301     gen_op(s, OP_CMPL, ot, OR_TMP0);
1302 
1303     dshift = gen_compute_Dshift(s, ot);
1304     gen_op_add_reg(s, s->aflag, R_ESI, dshift);
1305     gen_op_add_reg(s, s->aflag, R_EDI, dshift);
1306 }
1307 
1308 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1309 {
1310     if (s->flags & HF_IOBPT_MASK) {
1311 #ifdef CONFIG_USER_ONLY
1312         /* user-mode cpu should not be in IOBPT mode */
1313         g_assert_not_reached();
1314 #else
1315         TCGv_i32 t_size = tcg_constant_i32(1 << ot);
1316         TCGv t_next = eip_next_tl(s);
1317         gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
1318 #endif /* CONFIG_USER_ONLY */
1319     }
1320 }
1321 
1322 static void gen_ins(DisasContext *s, MemOp ot)
1323 {
1324     gen_string_movl_A0_EDI(s);
1325     /* Note: we must do this dummy write first to be restartable in
1326        case of page fault. */
1327     tcg_gen_movi_tl(s->T0, 0);
1328     gen_op_st_v(s, ot, s->T0, s->A0);
1329     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1330     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1331     gen_helper_in_func(ot, s->T0, s->tmp2_i32);
1332     gen_op_st_v(s, ot, s->T0, s->A0);
1333     gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
1334     gen_bpt_io(s, s->tmp2_i32, ot);
1335 }
1336 
1337 static void gen_outs(DisasContext *s, MemOp ot)
1338 {
1339     gen_string_movl_A0_ESI(s);
1340     gen_op_ld_v(s, ot, s->T0, s->A0);
1341 
1342     tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
1343     tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
1344     tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
1345     gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
1346     gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
1347     gen_bpt_io(s, s->tmp2_i32, ot);
1348 }
1349 
1350 /* Generate jumps to current or next instruction */
1351 static void gen_repz(DisasContext *s, MemOp ot,
1352                      void (*fn)(DisasContext *s, MemOp ot))
1353 {
1354     TCGLabel *l2;
1355     gen_update_cc_op(s);
1356     l2 = gen_jz_ecx_string(s);
1357     fn(s, ot);
1358     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1359     /*
1360      * A loop would cause two single step exceptions if ECX = 1
1361      * before rep string_insn
1362      */
1363     if (s->repz_opt) {
1364         gen_op_jz_ecx(s, l2);
1365     }
1366     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1367 }
1368 
1369 #define GEN_REPZ(op) \
1370     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1371     { gen_repz(s, ot, gen_##op); }
1372 
1373 static void gen_repz2(DisasContext *s, MemOp ot, int nz,
1374                       void (*fn)(DisasContext *s, MemOp ot))
1375 {
1376     TCGLabel *l2;
1377     gen_update_cc_op(s);
1378     l2 = gen_jz_ecx_string(s);
1379     fn(s, ot);
1380     gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
1381     gen_update_cc_op(s);
1382     gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
1383     if (s->repz_opt) {
1384         gen_op_jz_ecx(s, l2);
1385     }
1386     gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
1387 }
1388 
1389 #define GEN_REPZ2(op) \
1390     static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1391     { gen_repz2(s, ot, nz, gen_##op); }
1392 
1393 GEN_REPZ(movs)
1394 GEN_REPZ(stos)
1395 GEN_REPZ(lods)
1396 GEN_REPZ(ins)
1397 GEN_REPZ(outs)
1398 GEN_REPZ2(scas)
1399 GEN_REPZ2(cmps)
1400 
1401 static void gen_helper_fp_arith_ST0_FT0(int op)
1402 {
1403     switch (op) {
1404     case 0:
1405         gen_helper_fadd_ST0_FT0(tcg_env);
1406         break;
1407     case 1:
1408         gen_helper_fmul_ST0_FT0(tcg_env);
1409         break;
1410     case 2:
1411         gen_helper_fcom_ST0_FT0(tcg_env);
1412         break;
1413     case 3:
1414         gen_helper_fcom_ST0_FT0(tcg_env);
1415         break;
1416     case 4:
1417         gen_helper_fsub_ST0_FT0(tcg_env);
1418         break;
1419     case 5:
1420         gen_helper_fsubr_ST0_FT0(tcg_env);
1421         break;
1422     case 6:
1423         gen_helper_fdiv_ST0_FT0(tcg_env);
1424         break;
1425     case 7:
1426         gen_helper_fdivr_ST0_FT0(tcg_env);
1427         break;
1428     }
1429 }
1430 
1431 /* NOTE the exception in "r" op ordering */
1432 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1433 {
1434     TCGv_i32 tmp = tcg_constant_i32(opreg);
1435     switch (op) {
1436     case 0:
1437         gen_helper_fadd_STN_ST0(tcg_env, tmp);
1438         break;
1439     case 1:
1440         gen_helper_fmul_STN_ST0(tcg_env, tmp);
1441         break;
1442     case 4:
1443         gen_helper_fsubr_STN_ST0(tcg_env, tmp);
1444         break;
1445     case 5:
1446         gen_helper_fsub_STN_ST0(tcg_env, tmp);
1447         break;
1448     case 6:
1449         gen_helper_fdivr_STN_ST0(tcg_env, tmp);
1450         break;
1451     case 7:
1452         gen_helper_fdiv_STN_ST0(tcg_env, tmp);
1453         break;
1454     }
1455 }
1456 
1457 static void gen_exception(DisasContext *s, int trapno)
1458 {
1459     gen_update_cc_op(s);
1460     gen_update_eip_cur(s);
1461     gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
1462     s->base.is_jmp = DISAS_NORETURN;
1463 }
1464 
1465 /* Generate #UD for the current instruction.  The assumption here is that
1466    the instruction is known, but it isn't allowed in the current cpu mode.  */
1467 static void gen_illegal_opcode(DisasContext *s)
1468 {
1469     gen_exception(s, EXCP06_ILLOP);
1470 }
1471 
1472 /* Generate #GP for the current instruction. */
1473 static void gen_exception_gpf(DisasContext *s)
1474 {
1475     gen_exception(s, EXCP0D_GPF);
1476 }
1477 
1478 /* Check for cpl == 0; if not, raise #GP and return false. */
1479 static bool check_cpl0(DisasContext *s)
1480 {
1481     if (CPL(s) == 0) {
1482         return true;
1483     }
1484     gen_exception_gpf(s);
1485     return false;
1486 }
1487 
1488 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1489 static bool check_vm86_iopl(DisasContext *s)
1490 {
1491     if (!VM86(s) || IOPL(s) == 3) {
1492         return true;
1493     }
1494     gen_exception_gpf(s);
1495     return false;
1496 }
1497 
1498 /* Check for iopl allowing access; if not, raise #GP and return false. */
1499 static bool check_iopl(DisasContext *s)
1500 {
1501     if (VM86(s) ? IOPL(s) == 3 : CPL(s) <= IOPL(s)) {
1502         return true;
1503     }
1504     gen_exception_gpf(s);
1505     return false;
1506 }
1507 
1508 /* if d == OR_TMP0, it means memory operand (address in A0) */
1509 static void gen_op(DisasContext *s1, int op, MemOp ot, int d)
1510 {
1511     /* Invalid lock prefix when destination is not memory or OP_CMPL. */
1512     if ((d != OR_TMP0 || op == OP_CMPL) && s1->prefix & PREFIX_LOCK) {
1513         gen_illegal_opcode(s1);
1514         return;
1515     }
1516 
1517     if (d != OR_TMP0) {
1518         gen_op_mov_v_reg(s1, ot, s1->T0, d);
1519     } else if (!(s1->prefix & PREFIX_LOCK)) {
1520         gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1521     }
1522     switch(op) {
1523     case OP_ADCL:
1524         gen_compute_eflags_c(s1, s1->tmp4);
1525         if (s1->prefix & PREFIX_LOCK) {
1526             tcg_gen_add_tl(s1->T0, s1->tmp4, s1->T1);
1527             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1528                                         s1->mem_index, ot | MO_LE);
1529         } else {
1530             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1531             tcg_gen_add_tl(s1->T0, s1->T0, s1->tmp4);
1532             gen_op_st_rm_T0_A0(s1, ot, d);
1533         }
1534         gen_op_update3_cc(s1, s1->tmp4);
1535         set_cc_op(s1, CC_OP_ADCB + ot);
1536         break;
1537     case OP_SBBL:
1538         gen_compute_eflags_c(s1, s1->tmp4);
1539         if (s1->prefix & PREFIX_LOCK) {
1540             tcg_gen_add_tl(s1->T0, s1->T1, s1->tmp4);
1541             tcg_gen_neg_tl(s1->T0, s1->T0);
1542             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1543                                         s1->mem_index, ot | MO_LE);
1544         } else {
1545             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1546             tcg_gen_sub_tl(s1->T0, s1->T0, s1->tmp4);
1547             gen_op_st_rm_T0_A0(s1, ot, d);
1548         }
1549         gen_op_update3_cc(s1, s1->tmp4);
1550         set_cc_op(s1, CC_OP_SBBB + ot);
1551         break;
1552     case OP_ADDL:
1553         if (s1->prefix & PREFIX_LOCK) {
1554             tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T1,
1555                                         s1->mem_index, ot | MO_LE);
1556         } else {
1557             tcg_gen_add_tl(s1->T0, s1->T0, s1->T1);
1558             gen_op_st_rm_T0_A0(s1, ot, d);
1559         }
1560         gen_op_update2_cc(s1);
1561         set_cc_op(s1, CC_OP_ADDB + ot);
1562         break;
1563     case OP_SUBL:
1564         if (s1->prefix & PREFIX_LOCK) {
1565             tcg_gen_neg_tl(s1->T0, s1->T1);
1566             tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0,
1567                                         s1->mem_index, ot | MO_LE);
1568             tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1);
1569         } else {
1570             tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1571             tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1);
1572             gen_op_st_rm_T0_A0(s1, ot, d);
1573         }
1574         gen_op_update2_cc(s1);
1575         set_cc_op(s1, CC_OP_SUBB + ot);
1576         break;
1577     default:
1578     case OP_ANDL:
1579         if (s1->prefix & PREFIX_LOCK) {
1580             tcg_gen_atomic_and_fetch_tl(s1->T0, s1->A0, s1->T1,
1581                                         s1->mem_index, ot | MO_LE);
1582         } else {
1583             tcg_gen_and_tl(s1->T0, s1->T0, s1->T1);
1584             gen_op_st_rm_T0_A0(s1, ot, d);
1585         }
1586         gen_op_update1_cc(s1);
1587         set_cc_op(s1, CC_OP_LOGICB + ot);
1588         break;
1589     case OP_ORL:
1590         if (s1->prefix & PREFIX_LOCK) {
1591             tcg_gen_atomic_or_fetch_tl(s1->T0, s1->A0, s1->T1,
1592                                        s1->mem_index, ot | MO_LE);
1593         } else {
1594             tcg_gen_or_tl(s1->T0, s1->T0, s1->T1);
1595             gen_op_st_rm_T0_A0(s1, ot, d);
1596         }
1597         gen_op_update1_cc(s1);
1598         set_cc_op(s1, CC_OP_LOGICB + ot);
1599         break;
1600     case OP_XORL:
1601         if (s1->prefix & PREFIX_LOCK) {
1602             tcg_gen_atomic_xor_fetch_tl(s1->T0, s1->A0, s1->T1,
1603                                         s1->mem_index, ot | MO_LE);
1604         } else {
1605             tcg_gen_xor_tl(s1->T0, s1->T0, s1->T1);
1606             gen_op_st_rm_T0_A0(s1, ot, d);
1607         }
1608         gen_op_update1_cc(s1);
1609         set_cc_op(s1, CC_OP_LOGICB + ot);
1610         break;
1611     case OP_CMPL:
1612         tcg_gen_mov_tl(cpu_cc_src, s1->T1);
1613         tcg_gen_mov_tl(s1->cc_srcT, s1->T0);
1614         tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1);
1615         set_cc_op(s1, CC_OP_SUBB + ot);
1616         break;
1617     }
1618 }
1619 
1620 /* if d == OR_TMP0, it means memory operand (address in A0) */
1621 static void gen_inc(DisasContext *s1, MemOp ot, int d, int c)
1622 {
1623     if (s1->prefix & PREFIX_LOCK) {
1624         if (d != OR_TMP0) {
1625             /* Lock prefix when destination is not memory */
1626             gen_illegal_opcode(s1);
1627             return;
1628         }
1629         tcg_gen_movi_tl(s1->T0, c > 0 ? 1 : -1);
1630         tcg_gen_atomic_add_fetch_tl(s1->T0, s1->A0, s1->T0,
1631                                     s1->mem_index, ot | MO_LE);
1632     } else {
1633         if (d != OR_TMP0) {
1634             gen_op_mov_v_reg(s1, ot, s1->T0, d);
1635         } else {
1636             gen_op_ld_v(s1, ot, s1->T0, s1->A0);
1637         }
1638         tcg_gen_addi_tl(s1->T0, s1->T0, (c > 0 ? 1 : -1));
1639         gen_op_st_rm_T0_A0(s1, ot, d);
1640     }
1641 
1642     gen_compute_eflags_c(s1, cpu_cc_src);
1643     tcg_gen_mov_tl(cpu_cc_dst, s1->T0);
1644     set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1645 }
1646 
1647 static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
1648                             TCGv shm1, TCGv count, bool is_right)
1649 {
1650     TCGv_i32 z32, s32, oldop;
1651     TCGv z_tl;
1652 
1653     /* Store the results into the CC variables.  If we know that the
1654        variable must be dead, store unconditionally.  Otherwise we'll
1655        need to not disrupt the current contents.  */
1656     z_tl = tcg_constant_tl(0);
1657     if (cc_op_live[s->cc_op] & USES_CC_DST) {
1658         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1659                            result, cpu_cc_dst);
1660     } else {
1661         tcg_gen_mov_tl(cpu_cc_dst, result);
1662     }
1663     if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1664         tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1665                            shm1, cpu_cc_src);
1666     } else {
1667         tcg_gen_mov_tl(cpu_cc_src, shm1);
1668     }
1669 
1670     /* Get the two potential CC_OP values into temporaries.  */
1671     tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1672     if (s->cc_op == CC_OP_DYNAMIC) {
1673         oldop = cpu_cc_op;
1674     } else {
1675         tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
1676         oldop = s->tmp3_i32;
1677     }
1678 
1679     /* Conditionally store the CC_OP value.  */
1680     z32 = tcg_constant_i32(0);
1681     s32 = tcg_temp_new_i32();
1682     tcg_gen_trunc_tl_i32(s32, count);
1683     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
1684 
1685     /* The CC_OP value is no longer predictable.  */
1686     set_cc_op(s, CC_OP_DYNAMIC);
1687 }
1688 
1689 static void gen_shift_rm_T1(DisasContext *s, MemOp ot, int op1,
1690                             int is_right, int is_arith)
1691 {
1692     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1693 
1694     /* load */
1695     if (op1 == OR_TMP0) {
1696         gen_op_ld_v(s, ot, s->T0, s->A0);
1697     } else {
1698         gen_op_mov_v_reg(s, ot, s->T0, op1);
1699     }
1700 
1701     tcg_gen_andi_tl(s->T1, s->T1, mask);
1702     tcg_gen_subi_tl(s->tmp0, s->T1, 1);
1703 
1704     if (is_right) {
1705         if (is_arith) {
1706             gen_exts(ot, s->T0);
1707             tcg_gen_sar_tl(s->tmp0, s->T0, s->tmp0);
1708             tcg_gen_sar_tl(s->T0, s->T0, s->T1);
1709         } else {
1710             gen_extu(ot, s->T0);
1711             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
1712             tcg_gen_shr_tl(s->T0, s->T0, s->T1);
1713         }
1714     } else {
1715         tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
1716         tcg_gen_shl_tl(s->T0, s->T0, s->T1);
1717     }
1718 
1719     /* store */
1720     gen_op_st_rm_T0_A0(s, ot, op1);
1721 
1722     gen_shift_flags(s, ot, s->T0, s->tmp0, s->T1, is_right);
1723 }
1724 
1725 static void gen_shift_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1726                             int is_right, int is_arith)
1727 {
1728     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1729 
1730     /* load */
1731     if (op1 == OR_TMP0)
1732         gen_op_ld_v(s, ot, s->T0, s->A0);
1733     else
1734         gen_op_mov_v_reg(s, ot, s->T0, op1);
1735 
1736     op2 &= mask;
1737     if (op2 != 0) {
1738         if (is_right) {
1739             if (is_arith) {
1740                 gen_exts(ot, s->T0);
1741                 tcg_gen_sari_tl(s->tmp4, s->T0, op2 - 1);
1742                 tcg_gen_sari_tl(s->T0, s->T0, op2);
1743             } else {
1744                 gen_extu(ot, s->T0);
1745                 tcg_gen_shri_tl(s->tmp4, s->T0, op2 - 1);
1746                 tcg_gen_shri_tl(s->T0, s->T0, op2);
1747             }
1748         } else {
1749             tcg_gen_shli_tl(s->tmp4, s->T0, op2 - 1);
1750             tcg_gen_shli_tl(s->T0, s->T0, op2);
1751         }
1752     }
1753 
1754     /* store */
1755     gen_op_st_rm_T0_A0(s, ot, op1);
1756 
1757     /* update eflags if non zero shift */
1758     if (op2 != 0) {
1759         tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
1760         tcg_gen_mov_tl(cpu_cc_dst, s->T0);
1761         set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1762     }
1763 }
1764 
1765 static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
1766 {
1767     target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1768     TCGv_i32 t0, t1;
1769 
1770     /* load */
1771     if (op1 == OR_TMP0) {
1772         gen_op_ld_v(s, ot, s->T0, s->A0);
1773     } else {
1774         gen_op_mov_v_reg(s, ot, s->T0, op1);
1775     }
1776 
1777     tcg_gen_andi_tl(s->T1, s->T1, mask);
1778 
1779     switch (ot) {
1780     case MO_8:
1781         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
1782         tcg_gen_ext8u_tl(s->T0, s->T0);
1783         tcg_gen_muli_tl(s->T0, s->T0, 0x01010101);
1784         goto do_long;
1785     case MO_16:
1786         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
1787         tcg_gen_deposit_tl(s->T0, s->T0, s->T0, 16, 16);
1788         goto do_long;
1789     do_long:
1790 #ifdef TARGET_X86_64
1791     case MO_32:
1792         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1793         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
1794         if (is_right) {
1795             tcg_gen_rotr_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1796         } else {
1797             tcg_gen_rotl_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
1798         }
1799         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1800         break;
1801 #endif
1802     default:
1803         if (is_right) {
1804             tcg_gen_rotr_tl(s->T0, s->T0, s->T1);
1805         } else {
1806             tcg_gen_rotl_tl(s->T0, s->T0, s->T1);
1807         }
1808         break;
1809     }
1810 
1811     /* store */
1812     gen_op_st_rm_T0_A0(s, ot, op1);
1813 
1814     /* We'll need the flags computed into CC_SRC.  */
1815     gen_compute_eflags(s);
1816 
1817     /* The value that was "rotated out" is now present at the other end
1818        of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1819        since we've computed the flags into CC_SRC, these variables are
1820        currently dead.  */
1821     if (is_right) {
1822         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1823         tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1824         tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1825     } else {
1826         tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1827         tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1828     }
1829     tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1830     tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1831 
1832     /* Now conditionally store the new CC_OP value.  If the shift count
1833        is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1834        Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1835        exactly as we computed above.  */
1836     t0 = tcg_constant_i32(0);
1837     t1 = tcg_temp_new_i32();
1838     tcg_gen_trunc_tl_i32(t1, s->T1);
1839     tcg_gen_movi_i32(s->tmp2_i32, CC_OP_ADCOX);
1840     tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS);
1841     tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1842                         s->tmp2_i32, s->tmp3_i32);
1843 
1844     /* The CC_OP value is no longer predictable.  */
1845     set_cc_op(s, CC_OP_DYNAMIC);
1846 }
1847 
1848 static void gen_rot_rm_im(DisasContext *s, MemOp ot, int op1, int op2,
1849                           int is_right)
1850 {
1851     int mask = (ot == MO_64 ? 0x3f : 0x1f);
1852     int shift;
1853 
1854     /* load */
1855     if (op1 == OR_TMP0) {
1856         gen_op_ld_v(s, ot, s->T0, s->A0);
1857     } else {
1858         gen_op_mov_v_reg(s, ot, s->T0, op1);
1859     }
1860 
1861     op2 &= mask;
1862     if (op2 != 0) {
1863         switch (ot) {
1864 #ifdef TARGET_X86_64
1865         case MO_32:
1866             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
1867             if (is_right) {
1868                 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, op2);
1869             } else {
1870                 tcg_gen_rotli_i32(s->tmp2_i32, s->tmp2_i32, op2);
1871             }
1872             tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
1873             break;
1874 #endif
1875         default:
1876             if (is_right) {
1877                 tcg_gen_rotri_tl(s->T0, s->T0, op2);
1878             } else {
1879                 tcg_gen_rotli_tl(s->T0, s->T0, op2);
1880             }
1881             break;
1882         case MO_8:
1883             mask = 7;
1884             goto do_shifts;
1885         case MO_16:
1886             mask = 15;
1887         do_shifts:
1888             shift = op2 & mask;
1889             if (is_right) {
1890                 shift = mask + 1 - shift;
1891             }
1892             gen_extu(ot, s->T0);
1893             tcg_gen_shli_tl(s->tmp0, s->T0, shift);
1894             tcg_gen_shri_tl(s->T0, s->T0, mask + 1 - shift);
1895             tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
1896             break;
1897         }
1898     }
1899 
1900     /* store */
1901     gen_op_st_rm_T0_A0(s, ot, op1);
1902 
1903     if (op2 != 0) {
1904         /* Compute the flags into CC_SRC.  */
1905         gen_compute_eflags(s);
1906 
1907         /* The value that was "rotated out" is now present at the other end
1908            of the word.  Compute C into CC_DST and O into CC_SRC2.  Note that
1909            since we've computed the flags into CC_SRC, these variables are
1910            currently dead.  */
1911         if (is_right) {
1912             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask - 1);
1913             tcg_gen_shri_tl(cpu_cc_dst, s->T0, mask);
1914             tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1915         } else {
1916             tcg_gen_shri_tl(cpu_cc_src2, s->T0, mask);
1917             tcg_gen_andi_tl(cpu_cc_dst, s->T0, 1);
1918         }
1919         tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1920         tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1921         set_cc_op(s, CC_OP_ADCOX);
1922     }
1923 }
1924 
1925 /* XXX: add faster immediate = 1 case */
1926 static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
1927                            int is_right)
1928 {
1929     gen_compute_eflags(s);
1930     assert(s->cc_op == CC_OP_EFLAGS);
1931 
1932     /* load */
1933     if (op1 == OR_TMP0)
1934         gen_op_ld_v(s, ot, s->T0, s->A0);
1935     else
1936         gen_op_mov_v_reg(s, ot, s->T0, op1);
1937 
1938     if (is_right) {
1939         switch (ot) {
1940         case MO_8:
1941             gen_helper_rcrb(s->T0, tcg_env, s->T0, s->T1);
1942             break;
1943         case MO_16:
1944             gen_helper_rcrw(s->T0, tcg_env, s->T0, s->T1);
1945             break;
1946         case MO_32:
1947             gen_helper_rcrl(s->T0, tcg_env, s->T0, s->T1);
1948             break;
1949 #ifdef TARGET_X86_64
1950         case MO_64:
1951             gen_helper_rcrq(s->T0, tcg_env, s->T0, s->T1);
1952             break;
1953 #endif
1954         default:
1955             g_assert_not_reached();
1956         }
1957     } else {
1958         switch (ot) {
1959         case MO_8:
1960             gen_helper_rclb(s->T0, tcg_env, s->T0, s->T1);
1961             break;
1962         case MO_16:
1963             gen_helper_rclw(s->T0, tcg_env, s->T0, s->T1);
1964             break;
1965         case MO_32:
1966             gen_helper_rcll(s->T0, tcg_env, s->T0, s->T1);
1967             break;
1968 #ifdef TARGET_X86_64
1969         case MO_64:
1970             gen_helper_rclq(s->T0, tcg_env, s->T0, s->T1);
1971             break;
1972 #endif
1973         default:
1974             g_assert_not_reached();
1975         }
1976     }
1977     /* store */
1978     gen_op_st_rm_T0_A0(s, ot, op1);
1979 }
1980 
1981 /* XXX: add faster immediate case */
1982 static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
1983                              bool is_right, TCGv count_in)
1984 {
1985     target_ulong mask = (ot == MO_64 ? 63 : 31);
1986     TCGv count;
1987 
1988     /* load */
1989     if (op1 == OR_TMP0) {
1990         gen_op_ld_v(s, ot, s->T0, s->A0);
1991     } else {
1992         gen_op_mov_v_reg(s, ot, s->T0, op1);
1993     }
1994 
1995     count = tcg_temp_new();
1996     tcg_gen_andi_tl(count, count_in, mask);
1997 
1998     switch (ot) {
1999     case MO_16:
2000         /* Note: we implement the Intel behaviour for shift count > 16.
2001            This means "shrdw C, B, A" shifts A:B:A >> C.  Build the B:A
2002            portion by constructing it as a 32-bit value.  */
2003         if (is_right) {
2004             tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
2005             tcg_gen_mov_tl(s->T1, s->T0);
2006             tcg_gen_mov_tl(s->T0, s->tmp0);
2007         } else {
2008             tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
2009         }
2010         /*
2011          * If TARGET_X86_64 defined then fall through into MO_32 case,
2012          * otherwise fall through default case.
2013          */
2014     case MO_32:
2015 #ifdef TARGET_X86_64
2016         /* Concatenate the two 32-bit values and use a 64-bit shift.  */
2017         tcg_gen_subi_tl(s->tmp0, count, 1);
2018         if (is_right) {
2019             tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
2020             tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
2021             tcg_gen_shr_i64(s->T0, s->T0, count);
2022         } else {
2023             tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
2024             tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
2025             tcg_gen_shl_i64(s->T0, s->T0, count);
2026             tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
2027             tcg_gen_shri_i64(s->T0, s->T0, 32);
2028         }
2029         break;
2030 #endif
2031     default:
2032         tcg_gen_subi_tl(s->tmp0, count, 1);
2033         if (is_right) {
2034             tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
2035 
2036             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2037             tcg_gen_shr_tl(s->T0, s->T0, count);
2038             tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
2039         } else {
2040             tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
2041             if (ot == MO_16) {
2042                 /* Only needed if count > 16, for Intel behaviour.  */
2043                 tcg_gen_subfi_tl(s->tmp4, 33, count);
2044                 tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
2045                 tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
2046             }
2047 
2048             tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
2049             tcg_gen_shl_tl(s->T0, s->T0, count);
2050             tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
2051         }
2052         tcg_gen_movi_tl(s->tmp4, 0);
2053         tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
2054                            s->tmp4, s->T1);
2055         tcg_gen_or_tl(s->T0, s->T0, s->T1);
2056         break;
2057     }
2058 
2059     /* store */
2060     gen_op_st_rm_T0_A0(s, ot, op1);
2061 
2062     gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
2063 }
2064 
2065 static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s)
2066 {
2067     if (s != OR_TMP1)
2068         gen_op_mov_v_reg(s1, ot, s1->T1, s);
2069     switch(op) {
2070     case OP_ROL:
2071         gen_rot_rm_T1(s1, ot, d, 0);
2072         break;
2073     case OP_ROR:
2074         gen_rot_rm_T1(s1, ot, d, 1);
2075         break;
2076     case OP_SHL:
2077     case OP_SHL1:
2078         gen_shift_rm_T1(s1, ot, d, 0, 0);
2079         break;
2080     case OP_SHR:
2081         gen_shift_rm_T1(s1, ot, d, 1, 0);
2082         break;
2083     case OP_SAR:
2084         gen_shift_rm_T1(s1, ot, d, 1, 1);
2085         break;
2086     case OP_RCL:
2087         gen_rotc_rm_T1(s1, ot, d, 0);
2088         break;
2089     case OP_RCR:
2090         gen_rotc_rm_T1(s1, ot, d, 1);
2091         break;
2092     }
2093 }
2094 
2095 static void gen_shifti(DisasContext *s1, int op, MemOp ot, int d, int c)
2096 {
2097     switch(op) {
2098     case OP_ROL:
2099         gen_rot_rm_im(s1, ot, d, c, 0);
2100         break;
2101     case OP_ROR:
2102         gen_rot_rm_im(s1, ot, d, c, 1);
2103         break;
2104     case OP_SHL:
2105     case OP_SHL1:
2106         gen_shift_rm_im(s1, ot, d, c, 0, 0);
2107         break;
2108     case OP_SHR:
2109         gen_shift_rm_im(s1, ot, d, c, 1, 0);
2110         break;
2111     case OP_SAR:
2112         gen_shift_rm_im(s1, ot, d, c, 1, 1);
2113         break;
2114     default:
2115         /* currently not optimized */
2116         tcg_gen_movi_tl(s1->T1, c);
2117         gen_shift(s1, op, ot, d, OR_TMP1);
2118         break;
2119     }
2120 }
2121 
2122 #define X86_MAX_INSN_LENGTH 15
2123 
2124 static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
2125 {
2126     uint64_t pc = s->pc;
2127 
2128     /* This is a subsequent insn that crosses a page boundary.  */
2129     if (s->base.num_insns > 1 &&
2130         !is_same_page(&s->base, s->pc + num_bytes - 1)) {
2131         siglongjmp(s->jmpbuf, 2);
2132     }
2133 
2134     s->pc += num_bytes;
2135     if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
2136         /* If the instruction's 16th byte is on a different page than the 1st, a
2137          * page fault on the second page wins over the general protection fault
2138          * caused by the instruction being too long.
2139          * This can happen even if the operand is only one byte long!
2140          */
2141         if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
2142             volatile uint8_t unused =
2143                 cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
2144             (void) unused;
2145         }
2146         siglongjmp(s->jmpbuf, 1);
2147     }
2148 
2149     return pc;
2150 }
2151 
2152 static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
2153 {
2154     return translator_ldub(env, &s->base, advance_pc(env, s, 1));
2155 }
2156 
2157 static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
2158 {
2159     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2160 }
2161 
2162 static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
2163 {
2164     return translator_lduw(env, &s->base, advance_pc(env, s, 2));
2165 }
2166 
2167 static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
2168 {
2169     return translator_ldl(env, &s->base, advance_pc(env, s, 4));
2170 }
2171 
2172 #ifdef TARGET_X86_64
2173 static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
2174 {
2175     return translator_ldq(env, &s->base, advance_pc(env, s, 8));
2176 }
2177 #endif
2178 
2179 /* Decompose an address.  */
2180 
2181 typedef struct AddressParts {
2182     int def_seg;
2183     int base;
2184     int index;
2185     int scale;
2186     target_long disp;
2187 } AddressParts;
2188 
2189 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
2190                                     int modrm)
2191 {
2192     int def_seg, base, index, scale, mod, rm;
2193     target_long disp;
2194     bool havesib;
2195 
2196     def_seg = R_DS;
2197     index = -1;
2198     scale = 0;
2199     disp = 0;
2200 
2201     mod = (modrm >> 6) & 3;
2202     rm = modrm & 7;
2203     base = rm | REX_B(s);
2204 
2205     if (mod == 3) {
2206         /* Normally filtered out earlier, but including this path
2207            simplifies multi-byte nop, as well as bndcl, bndcu, bndcn.  */
2208         goto done;
2209     }
2210 
2211     switch (s->aflag) {
2212     case MO_64:
2213     case MO_32:
2214         havesib = 0;
2215         if (rm == 4) {
2216             int code = x86_ldub_code(env, s);
2217             scale = (code >> 6) & 3;
2218             index = ((code >> 3) & 7) | REX_X(s);
2219             if (index == 4) {
2220                 index = -1;  /* no index */
2221             }
2222             base = (code & 7) | REX_B(s);
2223             havesib = 1;
2224         }
2225 
2226         switch (mod) {
2227         case 0:
2228             if ((base & 7) == 5) {
2229                 base = -1;
2230                 disp = (int32_t)x86_ldl_code(env, s);
2231                 if (CODE64(s) && !havesib) {
2232                     base = -2;
2233                     disp += s->pc + s->rip_offset;
2234                 }
2235             }
2236             break;
2237         case 1:
2238             disp = (int8_t)x86_ldub_code(env, s);
2239             break;
2240         default:
2241         case 2:
2242             disp = (int32_t)x86_ldl_code(env, s);
2243             break;
2244         }
2245 
2246         /* For correct popl handling with esp.  */
2247         if (base == R_ESP && s->popl_esp_hack) {
2248             disp += s->popl_esp_hack;
2249         }
2250         if (base == R_EBP || base == R_ESP) {
2251             def_seg = R_SS;
2252         }
2253         break;
2254 
2255     case MO_16:
2256         if (mod == 0) {
2257             if (rm == 6) {
2258                 base = -1;
2259                 disp = x86_lduw_code(env, s);
2260                 break;
2261             }
2262         } else if (mod == 1) {
2263             disp = (int8_t)x86_ldub_code(env, s);
2264         } else {
2265             disp = (int16_t)x86_lduw_code(env, s);
2266         }
2267 
2268         switch (rm) {
2269         case 0:
2270             base = R_EBX;
2271             index = R_ESI;
2272             break;
2273         case 1:
2274             base = R_EBX;
2275             index = R_EDI;
2276             break;
2277         case 2:
2278             base = R_EBP;
2279             index = R_ESI;
2280             def_seg = R_SS;
2281             break;
2282         case 3:
2283             base = R_EBP;
2284             index = R_EDI;
2285             def_seg = R_SS;
2286             break;
2287         case 4:
2288             base = R_ESI;
2289             break;
2290         case 5:
2291             base = R_EDI;
2292             break;
2293         case 6:
2294             base = R_EBP;
2295             def_seg = R_SS;
2296             break;
2297         default:
2298         case 7:
2299             base = R_EBX;
2300             break;
2301         }
2302         break;
2303 
2304     default:
2305         g_assert_not_reached();
2306     }
2307 
2308  done:
2309     return (AddressParts){ def_seg, base, index, scale, disp };
2310 }
2311 
2312 /* Compute the address, with a minimum number of TCG ops.  */
2313 static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
2314 {
2315     TCGv ea = NULL;
2316 
2317     if (a.index >= 0 && !is_vsib) {
2318         if (a.scale == 0) {
2319             ea = cpu_regs[a.index];
2320         } else {
2321             tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
2322             ea = s->A0;
2323         }
2324         if (a.base >= 0) {
2325             tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
2326             ea = s->A0;
2327         }
2328     } else if (a.base >= 0) {
2329         ea = cpu_regs[a.base];
2330     }
2331     if (!ea) {
2332         if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
2333             /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2334             tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
2335         } else {
2336             tcg_gen_movi_tl(s->A0, a.disp);
2337         }
2338         ea = s->A0;
2339     } else if (a.disp != 0) {
2340         tcg_gen_addi_tl(s->A0, ea, a.disp);
2341         ea = s->A0;
2342     }
2343 
2344     return ea;
2345 }
2346 
2347 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2348 {
2349     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2350     TCGv ea = gen_lea_modrm_1(s, a, false);
2351     gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2352 }
2353 
2354 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2355 {
2356     (void)gen_lea_modrm_0(env, s, modrm);
2357 }
2358 
2359 /* Used for BNDCL, BNDCU, BNDCN.  */
2360 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2361                       TCGCond cond, TCGv_i64 bndv)
2362 {
2363     AddressParts a = gen_lea_modrm_0(env, s, modrm);
2364     TCGv ea = gen_lea_modrm_1(s, a, false);
2365 
2366     tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
2367     if (!CODE64(s)) {
2368         tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
2369     }
2370     tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
2371     tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
2372     gen_helper_bndck(tcg_env, s->tmp2_i32);
2373 }
2374 
2375 /* used for LEA and MOV AX, mem */
2376 static void gen_add_A0_ds_seg(DisasContext *s)
2377 {
2378     gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
2379 }
2380 
2381 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2382    OR_TMP0 */
2383 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2384                            MemOp ot, int reg, int is_store)
2385 {
2386     int mod, rm;
2387 
2388     mod = (modrm >> 6) & 3;
2389     rm = (modrm & 7) | REX_B(s);
2390     if (mod == 3) {
2391         if (is_store) {
2392             if (reg != OR_TMP0)
2393                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2394             gen_op_mov_reg_v(s, ot, rm, s->T0);
2395         } else {
2396             gen_op_mov_v_reg(s, ot, s->T0, rm);
2397             if (reg != OR_TMP0)
2398                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2399         }
2400     } else {
2401         gen_lea_modrm(env, s, modrm);
2402         if (is_store) {
2403             if (reg != OR_TMP0)
2404                 gen_op_mov_v_reg(s, ot, s->T0, reg);
2405             gen_op_st_v(s, ot, s->T0, s->A0);
2406         } else {
2407             gen_op_ld_v(s, ot, s->T0, s->A0);
2408             if (reg != OR_TMP0)
2409                 gen_op_mov_reg_v(s, ot, reg, s->T0);
2410         }
2411     }
2412 }
2413 
2414 static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
2415 {
2416     target_ulong ret;
2417 
2418     switch (ot) {
2419     case MO_8:
2420         ret = x86_ldub_code(env, s);
2421         break;
2422     case MO_16:
2423         ret = x86_lduw_code(env, s);
2424         break;
2425     case MO_32:
2426         ret = x86_ldl_code(env, s);
2427         break;
2428 #ifdef TARGET_X86_64
2429     case MO_64:
2430         ret = x86_ldq_code(env, s);
2431         break;
2432 #endif
2433     default:
2434         g_assert_not_reached();
2435     }
2436     return ret;
2437 }
2438 
2439 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
2440 {
2441     uint32_t ret;
2442 
2443     switch (ot) {
2444     case MO_8:
2445         ret = x86_ldub_code(env, s);
2446         break;
2447     case MO_16:
2448         ret = x86_lduw_code(env, s);
2449         break;
2450     case MO_32:
2451 #ifdef TARGET_X86_64
2452     case MO_64:
2453 #endif
2454         ret = x86_ldl_code(env, s);
2455         break;
2456     default:
2457         g_assert_not_reached();
2458     }
2459     return ret;
2460 }
2461 
2462 static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
2463 {
2464     target_long ret;
2465 
2466     switch (ot) {
2467     case MO_8:
2468         ret = (int8_t) x86_ldub_code(env, s);
2469         break;
2470     case MO_16:
2471         ret = (int16_t) x86_lduw_code(env, s);
2472         break;
2473     case MO_32:
2474         ret = (int32_t) x86_ldl_code(env, s);
2475         break;
2476 #ifdef TARGET_X86_64
2477     case MO_64:
2478         ret = x86_ldq_code(env, s);
2479         break;
2480 #endif
2481     default:
2482         g_assert_not_reached();
2483     }
2484     return ret;
2485 }
2486 
2487 static inline int insn_const_size(MemOp ot)
2488 {
2489     if (ot <= MO_32) {
2490         return 1 << ot;
2491     } else {
2492         return 4;
2493     }
2494 }
2495 
2496 static void gen_jcc(DisasContext *s, int b, int diff)
2497 {
2498     TCGLabel *l1 = gen_new_label();
2499 
2500     gen_jcc1(s, b, l1);
2501     gen_jmp_rel_csize(s, 0, 1);
2502     gen_set_label(l1);
2503     gen_jmp_rel(s, s->dflag, diff, 0);
2504 }
2505 
2506 static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
2507 {
2508     CCPrepare cc = gen_prepare_cc(s, b, s->T1);
2509 
2510     if (cc.mask != -1) {
2511         TCGv t0 = tcg_temp_new();
2512         tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2513         cc.reg = t0;
2514     }
2515     if (!cc.use_reg2) {
2516         cc.reg2 = tcg_constant_tl(cc.imm);
2517     }
2518 
2519     tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
2520 }
2521 
2522 static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg)
2523 {
2524     tcg_gen_ld32u_tl(s->T0, tcg_env,
2525                      offsetof(CPUX86State,segs[seg_reg].selector));
2526 }
2527 
2528 static inline void gen_op_movl_seg_T0_vm(DisasContext *s, X86Seg seg_reg)
2529 {
2530     tcg_gen_ext16u_tl(s->T0, s->T0);
2531     tcg_gen_st32_tl(s->T0, tcg_env,
2532                     offsetof(CPUX86State,segs[seg_reg].selector));
2533     tcg_gen_shli_tl(cpu_seg_base[seg_reg], s->T0, 4);
2534 }
2535 
2536 /* move T0 to seg_reg and compute if the CPU state may change. Never
2537    call this function with seg_reg == R_CS */
2538 static void gen_movl_seg_T0(DisasContext *s, X86Seg seg_reg)
2539 {
2540     if (PE(s) && !VM86(s)) {
2541         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
2542         gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
2543         /* abort translation because the addseg value may change or
2544            because ss32 may change. For R_SS, translation must always
2545            stop as a special handling must be done to disable hardware
2546            interrupts for the next instruction */
2547         if (seg_reg == R_SS) {
2548             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2549         } else if (CODE32(s) && seg_reg < R_FS) {
2550             s->base.is_jmp = DISAS_EOB_NEXT;
2551         }
2552     } else {
2553         gen_op_movl_seg_T0_vm(s, seg_reg);
2554         if (seg_reg == R_SS) {
2555             s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
2556         }
2557     }
2558 }
2559 
2560 static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
2561 {
2562     /* no SVM activated; fast case */
2563     if (likely(!GUEST(s))) {
2564         return;
2565     }
2566     gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
2567 }
2568 
2569 static inline void gen_stack_update(DisasContext *s, int addend)
2570 {
2571     gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
2572 }
2573 
2574 /* Generate a push. It depends on ss32, addseg and dflag.  */
2575 static void gen_push_v(DisasContext *s, TCGv val)
2576 {
2577     MemOp d_ot = mo_pushpop(s, s->dflag);
2578     MemOp a_ot = mo_stacksize(s);
2579     int size = 1 << d_ot;
2580     TCGv new_esp = s->A0;
2581 
2582     tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
2583 
2584     if (!CODE64(s)) {
2585         if (ADDSEG(s)) {
2586             new_esp = tcg_temp_new();
2587             tcg_gen_mov_tl(new_esp, s->A0);
2588         }
2589         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2590     }
2591 
2592     gen_op_st_v(s, d_ot, val, s->A0);
2593     gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
2594 }
2595 
2596 /* two step pop is necessary for precise exceptions */
2597 static MemOp gen_pop_T0(DisasContext *s)
2598 {
2599     MemOp d_ot = mo_pushpop(s, s->dflag);
2600 
2601     gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
2602     gen_op_ld_v(s, d_ot, s->T0, s->T0);
2603 
2604     return d_ot;
2605 }
2606 
2607 static inline void gen_pop_update(DisasContext *s, MemOp ot)
2608 {
2609     gen_stack_update(s, 1 << ot);
2610 }
2611 
2612 static inline void gen_stack_A0(DisasContext *s)
2613 {
2614     gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2615 }
2616 
2617 static void gen_pusha(DisasContext *s)
2618 {
2619     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2620     MemOp d_ot = s->dflag;
2621     int size = 1 << d_ot;
2622     int i;
2623 
2624     for (i = 0; i < 8; i++) {
2625         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
2626         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2627         gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
2628     }
2629 
2630     gen_stack_update(s, -8 * size);
2631 }
2632 
2633 static void gen_popa(DisasContext *s)
2634 {
2635     MemOp s_ot = SS32(s) ? MO_32 : MO_16;
2636     MemOp d_ot = s->dflag;
2637     int size = 1 << d_ot;
2638     int i;
2639 
2640     for (i = 0; i < 8; i++) {
2641         /* ESP is not reloaded */
2642         if (7 - i == R_ESP) {
2643             continue;
2644         }
2645         tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
2646         gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
2647         gen_op_ld_v(s, d_ot, s->T0, s->A0);
2648         gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
2649     }
2650 
2651     gen_stack_update(s, 8 * size);
2652 }
2653 
2654 static void gen_enter(DisasContext *s, int esp_addend, int level)
2655 {
2656     MemOp d_ot = mo_pushpop(s, s->dflag);
2657     MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
2658     int size = 1 << d_ot;
2659 
2660     /* Push BP; compute FrameTemp into T1.  */
2661     tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
2662     gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
2663     gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
2664 
2665     level &= 31;
2666     if (level != 0) {
2667         int i;
2668 
2669         /* Copy level-1 pointers from the previous frame.  */
2670         for (i = 1; i < level; ++i) {
2671             tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
2672             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2673             gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
2674 
2675             tcg_gen_subi_tl(s->A0, s->T1, size * i);
2676             gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2677             gen_op_st_v(s, d_ot, s->tmp0, s->A0);
2678         }
2679 
2680         /* Push the current FrameTemp as the last level.  */
2681         tcg_gen_subi_tl(s->A0, s->T1, size * level);
2682         gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
2683         gen_op_st_v(s, d_ot, s->T1, s->A0);
2684     }
2685 
2686     /* Copy the FrameTemp value to EBP.  */
2687     gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
2688 
2689     /* Compute the final value of ESP.  */
2690     tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
2691     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2692 }
2693 
2694 static void gen_leave(DisasContext *s)
2695 {
2696     MemOp d_ot = mo_pushpop(s, s->dflag);
2697     MemOp a_ot = mo_stacksize(s);
2698 
2699     gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2700     gen_op_ld_v(s, d_ot, s->T0, s->A0);
2701 
2702     tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
2703 
2704     gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
2705     gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
2706 }
2707 
2708 /* Similarly, except that the assumption here is that we don't decode
2709    the instruction at all -- either a missing opcode, an unimplemented
2710    feature, or just a bogus instruction stream.  */
2711 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2712 {
2713     gen_illegal_opcode(s);
2714 
2715     if (qemu_loglevel_mask(LOG_UNIMP)) {
2716         FILE *logfile = qemu_log_trylock();
2717         if (logfile) {
2718             target_ulong pc = s->base.pc_next, end = s->pc;
2719 
2720             fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
2721             for (; pc < end; ++pc) {
2722                 fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
2723             }
2724             fprintf(logfile, "\n");
2725             qemu_log_unlock(logfile);
2726         }
2727     }
2728 }
2729 
2730 /* an interrupt is different from an exception because of the
2731    privilege checks */
2732 static void gen_interrupt(DisasContext *s, int intno)
2733 {
2734     gen_update_cc_op(s);
2735     gen_update_eip_cur(s);
2736     gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
2737                                cur_insn_len_i32(s));
2738     s->base.is_jmp = DISAS_NORETURN;
2739 }
2740 
2741 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2742 {
2743     if ((s->flags & mask) == 0) {
2744         TCGv_i32 t = tcg_temp_new_i32();
2745         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2746         tcg_gen_ori_i32(t, t, mask);
2747         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2748         s->flags |= mask;
2749     }
2750 }
2751 
2752 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2753 {
2754     if (s->flags & mask) {
2755         TCGv_i32 t = tcg_temp_new_i32();
2756         tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2757         tcg_gen_andi_i32(t, t, ~mask);
2758         tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
2759         s->flags &= ~mask;
2760     }
2761 }
2762 
2763 static void gen_set_eflags(DisasContext *s, target_ulong mask)
2764 {
2765     TCGv t = tcg_temp_new();
2766 
2767     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2768     tcg_gen_ori_tl(t, t, mask);
2769     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2770 }
2771 
2772 static void gen_reset_eflags(DisasContext *s, target_ulong mask)
2773 {
2774     TCGv t = tcg_temp_new();
2775 
2776     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2777     tcg_gen_andi_tl(t, t, ~mask);
2778     tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
2779 }
2780 
2781 /* Clear BND registers during legacy branches.  */
2782 static void gen_bnd_jmp(DisasContext *s)
2783 {
2784     /* Clear the registers only if BND prefix is missing, MPX is enabled,
2785        and if the BNDREGs are known to be in use (non-zero) already.
2786        The helper itself will check BNDPRESERVE at runtime.  */
2787     if ((s->prefix & PREFIX_REPNZ) == 0
2788         && (s->flags & HF_MPX_EN_MASK) != 0
2789         && (s->flags & HF_MPX_IU_MASK) != 0) {
2790         gen_helper_bnd_jmp(tcg_env);
2791     }
2792 }
2793 
2794 /* Generate an end of block. Trace exception is also generated if needed.
2795    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2796    If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2797    S->TF.  This is used by the syscall/sysret insns.  */
2798 static void
2799 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
2800 {
2801     gen_update_cc_op(s);
2802 
2803     /* If several instructions disable interrupts, only the first does it.  */
2804     if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2805         gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2806     } else {
2807         gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2808     }
2809 
2810     if (s->base.tb->flags & HF_RF_MASK) {
2811         gen_reset_eflags(s, RF_MASK);
2812     }
2813     if (recheck_tf) {
2814         gen_helper_rechecking_single_step(tcg_env);
2815         tcg_gen_exit_tb(NULL, 0);
2816     } else if (s->flags & HF_TF_MASK) {
2817         gen_helper_single_step(tcg_env);
2818     } else if (jr) {
2819         tcg_gen_lookup_and_goto_ptr();
2820     } else {
2821         tcg_gen_exit_tb(NULL, 0);
2822     }
2823     s->base.is_jmp = DISAS_NORETURN;
2824 }
2825 
2826 static inline void
2827 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2828 {
2829     do_gen_eob_worker(s, inhibit, recheck_tf, false);
2830 }
2831 
2832 /* End of block.
2833    If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.  */
2834 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2835 {
2836     gen_eob_worker(s, inhibit, false);
2837 }
2838 
2839 /* End of block, resetting the inhibit irq flag.  */
2840 static void gen_eob(DisasContext *s)
2841 {
2842     gen_eob_worker(s, false, false);
2843 }
2844 
2845 /* Jump to register */
2846 static void gen_jr(DisasContext *s)
2847 {
2848     do_gen_eob_worker(s, false, false, true);
2849 }
2850 
2851 /* Jump to eip+diff, truncating the result to OT. */
2852 static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
2853 {
2854     bool use_goto_tb = s->jmp_opt;
2855     target_ulong mask = -1;
2856     target_ulong new_pc = s->pc + diff;
2857     target_ulong new_eip = new_pc - s->cs_base;
2858 
2859     /* In 64-bit mode, operand size is fixed at 64 bits. */
2860     if (!CODE64(s)) {
2861         if (ot == MO_16) {
2862             mask = 0xffff;
2863             if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
2864                 use_goto_tb = false;
2865             }
2866         } else {
2867             mask = 0xffffffff;
2868         }
2869     }
2870     new_eip &= mask;
2871 
2872     gen_update_cc_op(s);
2873     set_cc_op(s, CC_OP_DYNAMIC);
2874 
2875     if (tb_cflags(s->base.tb) & CF_PCREL) {
2876         tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
2877         /*
2878          * If we can prove the branch does not leave the page and we have
2879          * no extra masking to apply (data16 branch in code32, see above),
2880          * then we have also proven that the addition does not wrap.
2881          */
2882         if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
2883             tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
2884             use_goto_tb = false;
2885         }
2886     } else if (!CODE64(s)) {
2887         new_pc = (uint32_t)(new_eip + s->cs_base);
2888     }
2889 
2890     if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
2891         /* jump to same page: we can use a direct jump */
2892         tcg_gen_goto_tb(tb_num);
2893         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2894             tcg_gen_movi_tl(cpu_eip, new_eip);
2895         }
2896         tcg_gen_exit_tb(s->base.tb, tb_num);
2897         s->base.is_jmp = DISAS_NORETURN;
2898     } else {
2899         if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
2900             tcg_gen_movi_tl(cpu_eip, new_eip);
2901         }
2902         if (s->jmp_opt) {
2903             gen_jr(s);   /* jump to another page */
2904         } else {
2905             gen_eob(s);  /* exit to main loop */
2906         }
2907     }
2908 }
2909 
2910 /* Jump to eip+diff, truncating to the current code size. */
2911 static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
2912 {
2913     /* CODE64 ignores the OT argument, so we need not consider it. */
2914     gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
2915 }
2916 
2917 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2918 {
2919     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2920     tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
2921 }
2922 
2923 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2924 {
2925     tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
2926     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
2927 }
2928 
2929 static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
2930 {
2931     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2932                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2933     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2934     int mem_index = s->mem_index;
2935     TCGv_i128 t = tcg_temp_new_i128();
2936 
2937     tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
2938     tcg_gen_st_i128(t, tcg_env, offset);
2939 }
2940 
2941 static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
2942 {
2943     MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
2944                   ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
2945     MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
2946     int mem_index = s->mem_index;
2947     TCGv_i128 t = tcg_temp_new_i128();
2948 
2949     tcg_gen_ld_i128(t, tcg_env, offset);
2950     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
2951 }
2952 
2953 static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
2954 {
2955     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2956     int mem_index = s->mem_index;
2957     TCGv_i128 t0 = tcg_temp_new_i128();
2958     TCGv_i128 t1 = tcg_temp_new_i128();
2959 
2960     tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2961     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2962     tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
2963 
2964     tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2965     tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2966 }
2967 
2968 static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
2969 {
2970     MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
2971     int mem_index = s->mem_index;
2972     TCGv_i128 t = tcg_temp_new_i128();
2973 
2974     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
2975     tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
2976     tcg_gen_addi_tl(s->tmp0, s->A0, 16);
2977     tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
2978     tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
2979 }
2980 
2981 #include "decode-new.h"
2982 #include "emit.c.inc"
2983 #include "decode-new.c.inc"
2984 
2985 static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
2986 {
2987     TCGv_i64 cmp, val, old;
2988     TCGv Z;
2989 
2990     gen_lea_modrm(env, s, modrm);
2991 
2992     cmp = tcg_temp_new_i64();
2993     val = tcg_temp_new_i64();
2994     old = tcg_temp_new_i64();
2995 
2996     /* Construct the comparison values from the register pair. */
2997     tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
2998     tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
2999 
3000     /* Only require atomic with LOCK; non-parallel handled in generator. */
3001     if (s->prefix & PREFIX_LOCK) {
3002         tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
3003     } else {
3004         tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
3005                                       s->mem_index, MO_TEUQ);
3006     }
3007 
3008     /* Set tmp0 to match the required value of Z. */
3009     tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
3010     Z = tcg_temp_new();
3011     tcg_gen_trunc_i64_tl(Z, cmp);
3012 
3013     /*
3014      * Extract the result values for the register pair.
3015      * For 32-bit, we may do this unconditionally, because on success (Z=1),
3016      * the old value matches the previous value in EDX:EAX.  For x86_64,
3017      * the store must be conditional, because we must leave the source
3018      * registers unchanged on success, and zero-extend the writeback
3019      * on failure (Z=0).
3020      */
3021     if (TARGET_LONG_BITS == 32) {
3022         tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
3023     } else {
3024         TCGv zero = tcg_constant_tl(0);
3025 
3026         tcg_gen_extr_i64_tl(s->T0, s->T1, old);
3027         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
3028                            s->T0, cpu_regs[R_EAX]);
3029         tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
3030                            s->T1, cpu_regs[R_EDX]);
3031     }
3032 
3033     /* Update Z. */
3034     gen_compute_eflags(s);
3035     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
3036 }
3037 
3038 #ifdef TARGET_X86_64
3039 static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
3040 {
3041     MemOp mop = MO_TE | MO_128 | MO_ALIGN;
3042     TCGv_i64 t0, t1;
3043     TCGv_i128 cmp, val;
3044 
3045     gen_lea_modrm(env, s, modrm);
3046 
3047     cmp = tcg_temp_new_i128();
3048     val = tcg_temp_new_i128();
3049     tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
3050     tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
3051 
3052     /* Only require atomic with LOCK; non-parallel handled in generator. */
3053     if (s->prefix & PREFIX_LOCK) {
3054         tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3055     } else {
3056         tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
3057     }
3058 
3059     tcg_gen_extr_i128_i64(s->T0, s->T1, val);
3060 
3061     /* Determine success after the fact. */
3062     t0 = tcg_temp_new_i64();
3063     t1 = tcg_temp_new_i64();
3064     tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
3065     tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
3066     tcg_gen_or_i64(t0, t0, t1);
3067 
3068     /* Update Z. */
3069     gen_compute_eflags(s);
3070     tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
3071     tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
3072 
3073     /*
3074      * Extract the result values for the register pair.  We may do this
3075      * unconditionally, because on success (Z=1), the old value matches
3076      * the previous value in RDX:RAX.
3077      */
3078     tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
3079     tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
3080 }
3081 #endif
3082 
3083 /* convert one instruction. s->base.is_jmp is set if the translation must
3084    be stopped. Return the next pc value */
3085 static bool disas_insn(DisasContext *s, CPUState *cpu)
3086 {
3087     CPUX86State *env = cpu_env(cpu);
3088     int b, prefixes;
3089     int shift;
3090     MemOp ot, aflag, dflag;
3091     int modrm, reg, rm, mod, op, opreg, val;
3092     bool orig_cc_op_dirty = s->cc_op_dirty;
3093     CCOp orig_cc_op = s->cc_op;
3094     target_ulong orig_pc_save = s->pc_save;
3095 
3096     s->pc = s->base.pc_next;
3097     s->override = -1;
3098 #ifdef TARGET_X86_64
3099     s->rex_r = 0;
3100     s->rex_x = 0;
3101     s->rex_b = 0;
3102 #endif
3103     s->rip_offset = 0; /* for relative ip address */
3104     s->vex_l = 0;
3105     s->vex_v = 0;
3106     s->vex_w = false;
3107     switch (sigsetjmp(s->jmpbuf, 0)) {
3108     case 0:
3109         break;
3110     case 1:
3111         gen_exception_gpf(s);
3112         return true;
3113     case 2:
3114         /* Restore state that may affect the next instruction. */
3115         s->pc = s->base.pc_next;
3116         /*
3117          * TODO: These save/restore can be removed after the table-based
3118          * decoder is complete; we will be decoding the insn completely
3119          * before any code generation that might affect these variables.
3120          */
3121         s->cc_op_dirty = orig_cc_op_dirty;
3122         s->cc_op = orig_cc_op;
3123         s->pc_save = orig_pc_save;
3124         /* END TODO */
3125         s->base.num_insns--;
3126         tcg_remove_ops_after(s->prev_insn_end);
3127         s->base.insn_start = s->prev_insn_start;
3128         s->base.is_jmp = DISAS_TOO_MANY;
3129         return false;
3130     default:
3131         g_assert_not_reached();
3132     }
3133 
3134     prefixes = 0;
3135 
3136  next_byte:
3137     s->prefix = prefixes;
3138     b = x86_ldub_code(env, s);
3139     /* Collect prefixes.  */
3140     switch (b) {
3141     default:
3142         break;
3143     case 0x0f:
3144         b = x86_ldub_code(env, s) + 0x100;
3145         break;
3146     case 0xf3:
3147         prefixes |= PREFIX_REPZ;
3148         prefixes &= ~PREFIX_REPNZ;
3149         goto next_byte;
3150     case 0xf2:
3151         prefixes |= PREFIX_REPNZ;
3152         prefixes &= ~PREFIX_REPZ;
3153         goto next_byte;
3154     case 0xf0:
3155         prefixes |= PREFIX_LOCK;
3156         goto next_byte;
3157     case 0x2e:
3158         s->override = R_CS;
3159         goto next_byte;
3160     case 0x36:
3161         s->override = R_SS;
3162         goto next_byte;
3163     case 0x3e:
3164         s->override = R_DS;
3165         goto next_byte;
3166     case 0x26:
3167         s->override = R_ES;
3168         goto next_byte;
3169     case 0x64:
3170         s->override = R_FS;
3171         goto next_byte;
3172     case 0x65:
3173         s->override = R_GS;
3174         goto next_byte;
3175     case 0x66:
3176         prefixes |= PREFIX_DATA;
3177         goto next_byte;
3178     case 0x67:
3179         prefixes |= PREFIX_ADR;
3180         goto next_byte;
3181 #ifdef TARGET_X86_64
3182     case 0x40 ... 0x4f:
3183         if (CODE64(s)) {
3184             /* REX prefix */
3185             prefixes |= PREFIX_REX;
3186             s->vex_w = (b >> 3) & 1;
3187             s->rex_r = (b & 0x4) << 1;
3188             s->rex_x = (b & 0x2) << 2;
3189             s->rex_b = (b & 0x1) << 3;
3190             goto next_byte;
3191         }
3192         break;
3193 #endif
3194     case 0xc5: /* 2-byte VEX */
3195     case 0xc4: /* 3-byte VEX */
3196         if (CODE32(s) && !VM86(s)) {
3197             int vex2 = x86_ldub_code(env, s);
3198             s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
3199 
3200             if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
3201                 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3202                    otherwise the instruction is LES or LDS.  */
3203                 break;
3204             }
3205             disas_insn_new(s, cpu, b);
3206             return s->pc;
3207         }
3208         break;
3209     }
3210 
3211     /* Post-process prefixes.  */
3212     if (CODE64(s)) {
3213         /* In 64-bit mode, the default data size is 32-bit.  Select 64-bit
3214            data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3215            over 0x66 if both are present.  */
3216         dflag = (REX_W(s) ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
3217         /* In 64-bit mode, 0x67 selects 32-bit addressing.  */
3218         aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
3219     } else {
3220         /* In 16/32-bit mode, 0x66 selects the opposite data size.  */
3221         if (CODE32(s) ^ ((prefixes & PREFIX_DATA) != 0)) {
3222             dflag = MO_32;
3223         } else {
3224             dflag = MO_16;
3225         }
3226         /* In 16/32-bit mode, 0x67 selects the opposite addressing.  */
3227         if (CODE32(s) ^ ((prefixes & PREFIX_ADR) != 0)) {
3228             aflag = MO_32;
3229         }  else {
3230             aflag = MO_16;
3231         }
3232     }
3233 
3234     s->prefix = prefixes;
3235     s->aflag = aflag;
3236     s->dflag = dflag;
3237 
3238     /* now check op code */
3239     switch (b) {
3240         /**************************/
3241         /* arith & logic */
3242     case 0x00 ... 0x05:
3243     case 0x08 ... 0x0d:
3244     case 0x10 ... 0x15:
3245     case 0x18 ... 0x1d:
3246     case 0x20 ... 0x25:
3247     case 0x28 ... 0x2d:
3248     case 0x30 ... 0x35:
3249     case 0x38 ... 0x3d:
3250         {
3251             int f;
3252             op = (b >> 3) & 7;
3253             f = (b >> 1) & 3;
3254 
3255             ot = mo_b_d(b, dflag);
3256 
3257             switch(f) {
3258             case 0: /* OP Ev, Gv */
3259                 modrm = x86_ldub_code(env, s);
3260                 reg = ((modrm >> 3) & 7) | REX_R(s);
3261                 mod = (modrm >> 6) & 3;
3262                 rm = (modrm & 7) | REX_B(s);
3263                 if (mod != 3) {
3264                     gen_lea_modrm(env, s, modrm);
3265                     opreg = OR_TMP0;
3266                 } else if (op == OP_XORL && rm == reg) {
3267                 xor_zero:
3268                     /* xor reg, reg optimisation */
3269                     set_cc_op(s, CC_OP_CLR);
3270                     tcg_gen_movi_tl(s->T0, 0);
3271                     gen_op_mov_reg_v(s, ot, reg, s->T0);
3272                     break;
3273                 } else {
3274                     opreg = rm;
3275                 }
3276                 gen_op_mov_v_reg(s, ot, s->T1, reg);
3277                 gen_op(s, op, ot, opreg);
3278                 break;
3279             case 1: /* OP Gv, Ev */
3280                 modrm = x86_ldub_code(env, s);
3281                 mod = (modrm >> 6) & 3;
3282                 reg = ((modrm >> 3) & 7) | REX_R(s);
3283                 rm = (modrm & 7) | REX_B(s);
3284                 if (mod != 3) {
3285                     gen_lea_modrm(env, s, modrm);
3286                     gen_op_ld_v(s, ot, s->T1, s->A0);
3287                 } else if (op == OP_XORL && rm == reg) {
3288                     goto xor_zero;
3289                 } else {
3290                     gen_op_mov_v_reg(s, ot, s->T1, rm);
3291                 }
3292                 gen_op(s, op, ot, reg);
3293                 break;
3294             case 2: /* OP A, Iv */
3295                 val = insn_get(env, s, ot);
3296                 tcg_gen_movi_tl(s->T1, val);
3297                 gen_op(s, op, ot, OR_EAX);
3298                 break;
3299             }
3300         }
3301         break;
3302 
3303     case 0x82:
3304         if (CODE64(s))
3305             goto illegal_op;
3306         /* fall through */
3307     case 0x80: /* GRP1 */
3308     case 0x81:
3309     case 0x83:
3310         {
3311             ot = mo_b_d(b, dflag);
3312 
3313             modrm = x86_ldub_code(env, s);
3314             mod = (modrm >> 6) & 3;
3315             rm = (modrm & 7) | REX_B(s);
3316             op = (modrm >> 3) & 7;
3317 
3318             if (mod != 3) {
3319                 if (b == 0x83)
3320                     s->rip_offset = 1;
3321                 else
3322                     s->rip_offset = insn_const_size(ot);
3323                 gen_lea_modrm(env, s, modrm);
3324                 opreg = OR_TMP0;
3325             } else {
3326                 opreg = rm;
3327             }
3328 
3329             switch(b) {
3330             default:
3331             case 0x80:
3332             case 0x81:
3333             case 0x82:
3334                 val = insn_get(env, s, ot);
3335                 break;
3336             case 0x83:
3337                 val = (int8_t)insn_get(env, s, MO_8);
3338                 break;
3339             }
3340             tcg_gen_movi_tl(s->T1, val);
3341             gen_op(s, op, ot, opreg);
3342         }
3343         break;
3344 
3345         /**************************/
3346         /* inc, dec, and other misc arith */
3347     case 0x40 ... 0x47: /* inc Gv */
3348         ot = dflag;
3349         gen_inc(s, ot, OR_EAX + (b & 7), 1);
3350         break;
3351     case 0x48 ... 0x4f: /* dec Gv */
3352         ot = dflag;
3353         gen_inc(s, ot, OR_EAX + (b & 7), -1);
3354         break;
3355     case 0xf6: /* GRP3 */
3356     case 0xf7:
3357         ot = mo_b_d(b, dflag);
3358 
3359         modrm = x86_ldub_code(env, s);
3360         mod = (modrm >> 6) & 3;
3361         rm = (modrm & 7) | REX_B(s);
3362         op = (modrm >> 3) & 7;
3363         if (mod != 3) {
3364             if (op == 0) {
3365                 s->rip_offset = insn_const_size(ot);
3366             }
3367             gen_lea_modrm(env, s, modrm);
3368             /* For those below that handle locked memory, don't load here.  */
3369             if (!(s->prefix & PREFIX_LOCK)
3370                 || op != 2) {
3371                 gen_op_ld_v(s, ot, s->T0, s->A0);
3372             }
3373         } else {
3374             gen_op_mov_v_reg(s, ot, s->T0, rm);
3375         }
3376 
3377         switch(op) {
3378         case 0: /* test */
3379             val = insn_get(env, s, ot);
3380             tcg_gen_movi_tl(s->T1, val);
3381             gen_op_testl_T0_T1_cc(s);
3382             set_cc_op(s, CC_OP_LOGICB + ot);
3383             break;
3384         case 2: /* not */
3385             if (s->prefix & PREFIX_LOCK) {
3386                 if (mod == 3) {
3387                     goto illegal_op;
3388                 }
3389                 tcg_gen_movi_tl(s->T0, ~0);
3390                 tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T0,
3391                                             s->mem_index, ot | MO_LE);
3392             } else {
3393                 tcg_gen_not_tl(s->T0, s->T0);
3394                 if (mod != 3) {
3395                     gen_op_st_v(s, ot, s->T0, s->A0);
3396                 } else {
3397                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3398                 }
3399             }
3400             break;
3401         case 3: /* neg */
3402             if (s->prefix & PREFIX_LOCK) {
3403                 TCGLabel *label1;
3404                 TCGv a0, t0, t1, t2;
3405 
3406                 if (mod == 3) {
3407                     goto illegal_op;
3408                 }
3409                 a0 = s->A0;
3410                 t0 = s->T0;
3411                 label1 = gen_new_label();
3412 
3413                 gen_set_label(label1);
3414                 t1 = tcg_temp_new();
3415                 t2 = tcg_temp_new();
3416                 tcg_gen_mov_tl(t2, t0);
3417                 tcg_gen_neg_tl(t1, t0);
3418                 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
3419                                           s->mem_index, ot | MO_LE);
3420                 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
3421 
3422                 tcg_gen_neg_tl(s->T0, t0);
3423             } else {
3424                 tcg_gen_neg_tl(s->T0, s->T0);
3425                 if (mod != 3) {
3426                     gen_op_st_v(s, ot, s->T0, s->A0);
3427                 } else {
3428                     gen_op_mov_reg_v(s, ot, rm, s->T0);
3429                 }
3430             }
3431             gen_op_update_neg_cc(s);
3432             set_cc_op(s, CC_OP_SUBB + ot);
3433             break;
3434         case 4: /* mul */
3435             switch(ot) {
3436             case MO_8:
3437                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3438                 tcg_gen_ext8u_tl(s->T0, s->T0);
3439                 tcg_gen_ext8u_tl(s->T1, s->T1);
3440                 /* XXX: use 32 bit mul which could be faster */
3441                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3442                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3443                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3444                 tcg_gen_andi_tl(cpu_cc_src, s->T0, 0xff00);
3445                 set_cc_op(s, CC_OP_MULB);
3446                 break;
3447             case MO_16:
3448                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3449                 tcg_gen_ext16u_tl(s->T0, s->T0);
3450                 tcg_gen_ext16u_tl(s->T1, s->T1);
3451                 /* XXX: use 32 bit mul which could be faster */
3452                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3453                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3454                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3455                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3456                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3457                 tcg_gen_mov_tl(cpu_cc_src, s->T0);
3458                 set_cc_op(s, CC_OP_MULW);
3459                 break;
3460             default:
3461             case MO_32:
3462                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3463                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3464                 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
3465                                   s->tmp2_i32, s->tmp3_i32);
3466                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3467                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3468                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3469                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3470                 set_cc_op(s, CC_OP_MULL);
3471                 break;
3472 #ifdef TARGET_X86_64
3473             case MO_64:
3474                 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3475                                   s->T0, cpu_regs[R_EAX]);
3476                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3477                 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
3478                 set_cc_op(s, CC_OP_MULQ);
3479                 break;
3480 #endif
3481             }
3482             break;
3483         case 5: /* imul */
3484             switch(ot) {
3485             case MO_8:
3486                 gen_op_mov_v_reg(s, MO_8, s->T1, R_EAX);
3487                 tcg_gen_ext8s_tl(s->T0, s->T0);
3488                 tcg_gen_ext8s_tl(s->T1, s->T1);
3489                 /* XXX: use 32 bit mul which could be faster */
3490                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3491                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3492                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3493                 tcg_gen_ext8s_tl(s->tmp0, s->T0);
3494                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3495                 set_cc_op(s, CC_OP_MULB);
3496                 break;
3497             case MO_16:
3498                 gen_op_mov_v_reg(s, MO_16, s->T1, R_EAX);
3499                 tcg_gen_ext16s_tl(s->T0, s->T0);
3500                 tcg_gen_ext16s_tl(s->T1, s->T1);
3501                 /* XXX: use 32 bit mul which could be faster */
3502                 tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3503                 gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3504                 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3505                 tcg_gen_ext16s_tl(s->tmp0, s->T0);
3506                 tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3507                 tcg_gen_shri_tl(s->T0, s->T0, 16);
3508                 gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3509                 set_cc_op(s, CC_OP_MULW);
3510                 break;
3511             default:
3512             case MO_32:
3513                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3514                 tcg_gen_trunc_tl_i32(s->tmp3_i32, cpu_regs[R_EAX]);
3515                 tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3516                                   s->tmp2_i32, s->tmp3_i32);
3517                 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], s->tmp2_i32);
3518                 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], s->tmp3_i32);
3519                 tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3520                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3521                 tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3522                 tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3523                 set_cc_op(s, CC_OP_MULL);
3524                 break;
3525 #ifdef TARGET_X86_64
3526             case MO_64:
3527                 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
3528                                   s->T0, cpu_regs[R_EAX]);
3529                 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
3530                 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
3531                 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
3532                 set_cc_op(s, CC_OP_MULQ);
3533                 break;
3534 #endif
3535             }
3536             break;
3537         case 6: /* div */
3538             switch(ot) {
3539             case MO_8:
3540                 gen_helper_divb_AL(tcg_env, s->T0);
3541                 break;
3542             case MO_16:
3543                 gen_helper_divw_AX(tcg_env, s->T0);
3544                 break;
3545             default:
3546             case MO_32:
3547                 gen_helper_divl_EAX(tcg_env, s->T0);
3548                 break;
3549 #ifdef TARGET_X86_64
3550             case MO_64:
3551                 gen_helper_divq_EAX(tcg_env, s->T0);
3552                 break;
3553 #endif
3554             }
3555             break;
3556         case 7: /* idiv */
3557             switch(ot) {
3558             case MO_8:
3559                 gen_helper_idivb_AL(tcg_env, s->T0);
3560                 break;
3561             case MO_16:
3562                 gen_helper_idivw_AX(tcg_env, s->T0);
3563                 break;
3564             default:
3565             case MO_32:
3566                 gen_helper_idivl_EAX(tcg_env, s->T0);
3567                 break;
3568 #ifdef TARGET_X86_64
3569             case MO_64:
3570                 gen_helper_idivq_EAX(tcg_env, s->T0);
3571                 break;
3572 #endif
3573             }
3574             break;
3575         default:
3576             goto unknown_op;
3577         }
3578         break;
3579 
3580     case 0xfe: /* GRP4 */
3581     case 0xff: /* GRP5 */
3582         ot = mo_b_d(b, dflag);
3583 
3584         modrm = x86_ldub_code(env, s);
3585         mod = (modrm >> 6) & 3;
3586         rm = (modrm & 7) | REX_B(s);
3587         op = (modrm >> 3) & 7;
3588         if (op >= 2 && b == 0xfe) {
3589             goto unknown_op;
3590         }
3591         if (CODE64(s)) {
3592             if (op == 2 || op == 4) {
3593                 /* operand size for jumps is 64 bit */
3594                 ot = MO_64;
3595             } else if (op == 3 || op == 5) {
3596                 ot = dflag != MO_16 ? MO_32 + REX_W(s) : MO_16;
3597             } else if (op == 6) {
3598                 /* default push size is 64 bit */
3599                 ot = mo_pushpop(s, dflag);
3600             }
3601         }
3602         if (mod != 3) {
3603             gen_lea_modrm(env, s, modrm);
3604             if (op >= 2 && op != 3 && op != 5)
3605                 gen_op_ld_v(s, ot, s->T0, s->A0);
3606         } else {
3607             gen_op_mov_v_reg(s, ot, s->T0, rm);
3608         }
3609 
3610         switch(op) {
3611         case 0: /* inc Ev */
3612             if (mod != 3)
3613                 opreg = OR_TMP0;
3614             else
3615                 opreg = rm;
3616             gen_inc(s, ot, opreg, 1);
3617             break;
3618         case 1: /* dec Ev */
3619             if (mod != 3)
3620                 opreg = OR_TMP0;
3621             else
3622                 opreg = rm;
3623             gen_inc(s, ot, opreg, -1);
3624             break;
3625         case 2: /* call Ev */
3626             /* XXX: optimize if memory (no 'and' is necessary) */
3627             if (dflag == MO_16) {
3628                 tcg_gen_ext16u_tl(s->T0, s->T0);
3629             }
3630             gen_push_v(s, eip_next_tl(s));
3631             gen_op_jmp_v(s, s->T0);
3632             gen_bnd_jmp(s);
3633             s->base.is_jmp = DISAS_JUMP;
3634             break;
3635         case 3: /* lcall Ev */
3636             if (mod == 3) {
3637                 goto illegal_op;
3638             }
3639             gen_op_ld_v(s, ot, s->T1, s->A0);
3640             gen_add_A0_im(s, 1 << ot);
3641             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3642         do_lcall:
3643             if (PE(s) && !VM86(s)) {
3644                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3645                 gen_helper_lcall_protected(tcg_env, s->tmp2_i32, s->T1,
3646                                            tcg_constant_i32(dflag - 1),
3647                                            eip_next_tl(s));
3648             } else {
3649                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3650                 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3651                 gen_helper_lcall_real(tcg_env, s->tmp2_i32, s->tmp3_i32,
3652                                       tcg_constant_i32(dflag - 1),
3653                                       eip_next_i32(s));
3654             }
3655             s->base.is_jmp = DISAS_JUMP;
3656             break;
3657         case 4: /* jmp Ev */
3658             if (dflag == MO_16) {
3659                 tcg_gen_ext16u_tl(s->T0, s->T0);
3660             }
3661             gen_op_jmp_v(s, s->T0);
3662             gen_bnd_jmp(s);
3663             s->base.is_jmp = DISAS_JUMP;
3664             break;
3665         case 5: /* ljmp Ev */
3666             if (mod == 3) {
3667                 goto illegal_op;
3668             }
3669             gen_op_ld_v(s, ot, s->T1, s->A0);
3670             gen_add_A0_im(s, 1 << ot);
3671             gen_op_ld_v(s, MO_16, s->T0, s->A0);
3672         do_ljmp:
3673             if (PE(s) && !VM86(s)) {
3674                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3675                 gen_helper_ljmp_protected(tcg_env, s->tmp2_i32, s->T1,
3676                                           eip_next_tl(s));
3677             } else {
3678                 gen_op_movl_seg_T0_vm(s, R_CS);
3679                 gen_op_jmp_v(s, s->T1);
3680             }
3681             s->base.is_jmp = DISAS_JUMP;
3682             break;
3683         case 6: /* push Ev */
3684             gen_push_v(s, s->T0);
3685             break;
3686         default:
3687             goto unknown_op;
3688         }
3689         break;
3690 
3691     case 0x84: /* test Ev, Gv */
3692     case 0x85:
3693         ot = mo_b_d(b, dflag);
3694 
3695         modrm = x86_ldub_code(env, s);
3696         reg = ((modrm >> 3) & 7) | REX_R(s);
3697 
3698         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3699         gen_op_mov_v_reg(s, ot, s->T1, reg);
3700         gen_op_testl_T0_T1_cc(s);
3701         set_cc_op(s, CC_OP_LOGICB + ot);
3702         break;
3703 
3704     case 0xa8: /* test eAX, Iv */
3705     case 0xa9:
3706         ot = mo_b_d(b, dflag);
3707         val = insn_get(env, s, ot);
3708 
3709         gen_op_mov_v_reg(s, ot, s->T0, OR_EAX);
3710         tcg_gen_movi_tl(s->T1, val);
3711         gen_op_testl_T0_T1_cc(s);
3712         set_cc_op(s, CC_OP_LOGICB + ot);
3713         break;
3714 
3715     case 0x98: /* CWDE/CBW */
3716         switch (dflag) {
3717 #ifdef TARGET_X86_64
3718         case MO_64:
3719             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3720             tcg_gen_ext32s_tl(s->T0, s->T0);
3721             gen_op_mov_reg_v(s, MO_64, R_EAX, s->T0);
3722             break;
3723 #endif
3724         case MO_32:
3725             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3726             tcg_gen_ext16s_tl(s->T0, s->T0);
3727             gen_op_mov_reg_v(s, MO_32, R_EAX, s->T0);
3728             break;
3729         case MO_16:
3730             gen_op_mov_v_reg(s, MO_8, s->T0, R_EAX);
3731             tcg_gen_ext8s_tl(s->T0, s->T0);
3732             gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
3733             break;
3734         default:
3735             g_assert_not_reached();
3736         }
3737         break;
3738     case 0x99: /* CDQ/CWD */
3739         switch (dflag) {
3740 #ifdef TARGET_X86_64
3741         case MO_64:
3742             gen_op_mov_v_reg(s, MO_64, s->T0, R_EAX);
3743             tcg_gen_sari_tl(s->T0, s->T0, 63);
3744             gen_op_mov_reg_v(s, MO_64, R_EDX, s->T0);
3745             break;
3746 #endif
3747         case MO_32:
3748             gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
3749             tcg_gen_ext32s_tl(s->T0, s->T0);
3750             tcg_gen_sari_tl(s->T0, s->T0, 31);
3751             gen_op_mov_reg_v(s, MO_32, R_EDX, s->T0);
3752             break;
3753         case MO_16:
3754             gen_op_mov_v_reg(s, MO_16, s->T0, R_EAX);
3755             tcg_gen_ext16s_tl(s->T0, s->T0);
3756             tcg_gen_sari_tl(s->T0, s->T0, 15);
3757             gen_op_mov_reg_v(s, MO_16, R_EDX, s->T0);
3758             break;
3759         default:
3760             g_assert_not_reached();
3761         }
3762         break;
3763     case 0x1af: /* imul Gv, Ev */
3764     case 0x69: /* imul Gv, Ev, I */
3765     case 0x6b:
3766         ot = dflag;
3767         modrm = x86_ldub_code(env, s);
3768         reg = ((modrm >> 3) & 7) | REX_R(s);
3769         if (b == 0x69)
3770             s->rip_offset = insn_const_size(ot);
3771         else if (b == 0x6b)
3772             s->rip_offset = 1;
3773         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3774         if (b == 0x69) {
3775             val = insn_get(env, s, ot);
3776             tcg_gen_movi_tl(s->T1, val);
3777         } else if (b == 0x6b) {
3778             val = (int8_t)insn_get(env, s, MO_8);
3779             tcg_gen_movi_tl(s->T1, val);
3780         } else {
3781             gen_op_mov_v_reg(s, ot, s->T1, reg);
3782         }
3783         switch (ot) {
3784 #ifdef TARGET_X86_64
3785         case MO_64:
3786             tcg_gen_muls2_i64(cpu_regs[reg], s->T1, s->T0, s->T1);
3787             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3788             tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
3789             tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, s->T1);
3790             break;
3791 #endif
3792         case MO_32:
3793             tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
3794             tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
3795             tcg_gen_muls2_i32(s->tmp2_i32, s->tmp3_i32,
3796                               s->tmp2_i32, s->tmp3_i32);
3797             tcg_gen_extu_i32_tl(cpu_regs[reg], s->tmp2_i32);
3798             tcg_gen_sari_i32(s->tmp2_i32, s->tmp2_i32, 31);
3799             tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
3800             tcg_gen_sub_i32(s->tmp2_i32, s->tmp2_i32, s->tmp3_i32);
3801             tcg_gen_extu_i32_tl(cpu_cc_src, s->tmp2_i32);
3802             break;
3803         default:
3804             tcg_gen_ext16s_tl(s->T0, s->T0);
3805             tcg_gen_ext16s_tl(s->T1, s->T1);
3806             /* XXX: use 32 bit mul which could be faster */
3807             tcg_gen_mul_tl(s->T0, s->T0, s->T1);
3808             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
3809             tcg_gen_ext16s_tl(s->tmp0, s->T0);
3810             tcg_gen_sub_tl(cpu_cc_src, s->T0, s->tmp0);
3811             gen_op_mov_reg_v(s, ot, reg, s->T0);
3812             break;
3813         }
3814         set_cc_op(s, CC_OP_MULB + ot);
3815         break;
3816     case 0x1c0:
3817     case 0x1c1: /* xadd Ev, Gv */
3818         ot = mo_b_d(b, dflag);
3819         modrm = x86_ldub_code(env, s);
3820         reg = ((modrm >> 3) & 7) | REX_R(s);
3821         mod = (modrm >> 6) & 3;
3822         gen_op_mov_v_reg(s, ot, s->T0, reg);
3823         if (mod == 3) {
3824             rm = (modrm & 7) | REX_B(s);
3825             gen_op_mov_v_reg(s, ot, s->T1, rm);
3826             tcg_gen_add_tl(s->T0, s->T0, s->T1);
3827             gen_op_mov_reg_v(s, ot, reg, s->T1);
3828             gen_op_mov_reg_v(s, ot, rm, s->T0);
3829         } else {
3830             gen_lea_modrm(env, s, modrm);
3831             if (s->prefix & PREFIX_LOCK) {
3832                 tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
3833                                             s->mem_index, ot | MO_LE);
3834                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3835             } else {
3836                 gen_op_ld_v(s, ot, s->T1, s->A0);
3837                 tcg_gen_add_tl(s->T0, s->T0, s->T1);
3838                 gen_op_st_v(s, ot, s->T0, s->A0);
3839             }
3840             gen_op_mov_reg_v(s, ot, reg, s->T1);
3841         }
3842         gen_op_update2_cc(s);
3843         set_cc_op(s, CC_OP_ADDB + ot);
3844         break;
3845     case 0x1b0:
3846     case 0x1b1: /* cmpxchg Ev, Gv */
3847         {
3848             TCGv oldv, newv, cmpv, dest;
3849 
3850             ot = mo_b_d(b, dflag);
3851             modrm = x86_ldub_code(env, s);
3852             reg = ((modrm >> 3) & 7) | REX_R(s);
3853             mod = (modrm >> 6) & 3;
3854             oldv = tcg_temp_new();
3855             newv = tcg_temp_new();
3856             cmpv = tcg_temp_new();
3857             gen_op_mov_v_reg(s, ot, newv, reg);
3858             tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
3859             gen_extu(ot, cmpv);
3860             if (s->prefix & PREFIX_LOCK) {
3861                 if (mod == 3) {
3862                     goto illegal_op;
3863                 }
3864                 gen_lea_modrm(env, s, modrm);
3865                 tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
3866                                           s->mem_index, ot | MO_LE);
3867             } else {
3868                 if (mod == 3) {
3869                     rm = (modrm & 7) | REX_B(s);
3870                     gen_op_mov_v_reg(s, ot, oldv, rm);
3871                     gen_extu(ot, oldv);
3872 
3873                     /*
3874                      * Unlike the memory case, where "the destination operand receives
3875                      * a write cycle without regard to the result of the comparison",
3876                      * rm must not be touched altogether if the write fails, including
3877                      * not zero-extending it on 64-bit processors.  So, precompute
3878                      * the result of a successful writeback and perform the movcond
3879                      * directly on cpu_regs.  Also need to write accumulator first, in
3880                      * case rm is part of RAX too.
3881                      */
3882                     dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
3883                     tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
3884                 } else {
3885                     gen_lea_modrm(env, s, modrm);
3886                     gen_op_ld_v(s, ot, oldv, s->A0);
3887 
3888                     /*
3889                      * Perform an unconditional store cycle like physical cpu;
3890                      * must be before changing accumulator to ensure
3891                      * idempotency if the store faults and the instruction
3892                      * is restarted
3893                      */
3894                     tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
3895                     gen_op_st_v(s, ot, newv, s->A0);
3896                 }
3897             }
3898 	    /*
3899 	     * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3900 	     * since it's dead here.
3901 	     */
3902             dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
3903             tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
3904             tcg_gen_mov_tl(cpu_cc_src, oldv);
3905             tcg_gen_mov_tl(s->cc_srcT, cmpv);
3906             tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
3907             set_cc_op(s, CC_OP_SUBB + ot);
3908         }
3909         break;
3910     case 0x1c7: /* cmpxchg8b */
3911         modrm = x86_ldub_code(env, s);
3912         mod = (modrm >> 6) & 3;
3913         switch ((modrm >> 3) & 7) {
3914         case 1: /* CMPXCHG8, CMPXCHG16 */
3915             if (mod == 3) {
3916                 goto illegal_op;
3917             }
3918 #ifdef TARGET_X86_64
3919             if (dflag == MO_64) {
3920                 if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
3921                     goto illegal_op;
3922                 }
3923                 gen_cmpxchg16b(s, env, modrm);
3924                 break;
3925             }
3926 #endif
3927             if (!(s->cpuid_features & CPUID_CX8)) {
3928                 goto illegal_op;
3929             }
3930             gen_cmpxchg8b(s, env, modrm);
3931             break;
3932 
3933         case 7: /* RDSEED, RDPID with f3 prefix */
3934             if (mod != 3 ||
3935                 (s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
3936                 goto illegal_op;
3937             }
3938             if (s->prefix & PREFIX_REPZ) {
3939                 if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
3940                     goto illegal_op;
3941                 }
3942                 gen_helper_rdpid(s->T0, tcg_env);
3943                 rm = (modrm & 7) | REX_B(s);
3944                 gen_op_mov_reg_v(s, dflag, rm, s->T0);
3945                 break;
3946             } else {
3947                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
3948                     goto illegal_op;
3949                 }
3950                 goto do_rdrand;
3951             }
3952 
3953         case 6: /* RDRAND */
3954             if (mod != 3 ||
3955                 (s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
3956                 !(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
3957                 goto illegal_op;
3958             }
3959         do_rdrand:
3960             translator_io_start(&s->base);
3961             gen_helper_rdrand(s->T0, tcg_env);
3962             rm = (modrm & 7) | REX_B(s);
3963             gen_op_mov_reg_v(s, dflag, rm, s->T0);
3964             set_cc_op(s, CC_OP_EFLAGS);
3965             break;
3966 
3967         default:
3968             goto illegal_op;
3969         }
3970         break;
3971 
3972         /**************************/
3973         /* push/pop */
3974     case 0x50 ... 0x57: /* push */
3975         gen_op_mov_v_reg(s, MO_32, s->T0, (b & 7) | REX_B(s));
3976         gen_push_v(s, s->T0);
3977         break;
3978     case 0x58 ... 0x5f: /* pop */
3979         ot = gen_pop_T0(s);
3980         /* NOTE: order is important for pop %sp */
3981         gen_pop_update(s, ot);
3982         gen_op_mov_reg_v(s, ot, (b & 7) | REX_B(s), s->T0);
3983         break;
3984     case 0x60: /* pusha */
3985         if (CODE64(s))
3986             goto illegal_op;
3987         gen_pusha(s);
3988         break;
3989     case 0x61: /* popa */
3990         if (CODE64(s))
3991             goto illegal_op;
3992         gen_popa(s);
3993         break;
3994     case 0x68: /* push Iv */
3995     case 0x6a:
3996         ot = mo_pushpop(s, dflag);
3997         if (b == 0x68)
3998             val = insn_get(env, s, ot);
3999         else
4000             val = (int8_t)insn_get(env, s, MO_8);
4001         tcg_gen_movi_tl(s->T0, val);
4002         gen_push_v(s, s->T0);
4003         break;
4004     case 0x8f: /* pop Ev */
4005         modrm = x86_ldub_code(env, s);
4006         mod = (modrm >> 6) & 3;
4007         ot = gen_pop_T0(s);
4008         if (mod == 3) {
4009             /* NOTE: order is important for pop %sp */
4010             gen_pop_update(s, ot);
4011             rm = (modrm & 7) | REX_B(s);
4012             gen_op_mov_reg_v(s, ot, rm, s->T0);
4013         } else {
4014             /* NOTE: order is important too for MMU exceptions */
4015             s->popl_esp_hack = 1 << ot;
4016             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4017             s->popl_esp_hack = 0;
4018             gen_pop_update(s, ot);
4019         }
4020         break;
4021     case 0xc8: /* enter */
4022         {
4023             int level;
4024             val = x86_lduw_code(env, s);
4025             level = x86_ldub_code(env, s);
4026             gen_enter(s, val, level);
4027         }
4028         break;
4029     case 0xc9: /* leave */
4030         gen_leave(s);
4031         break;
4032     case 0x06: /* push es */
4033     case 0x0e: /* push cs */
4034     case 0x16: /* push ss */
4035     case 0x1e: /* push ds */
4036         if (CODE64(s))
4037             goto illegal_op;
4038         gen_op_movl_T0_seg(s, b >> 3);
4039         gen_push_v(s, s->T0);
4040         break;
4041     case 0x1a0: /* push fs */
4042     case 0x1a8: /* push gs */
4043         gen_op_movl_T0_seg(s, (b >> 3) & 7);
4044         gen_push_v(s, s->T0);
4045         break;
4046     case 0x07: /* pop es */
4047     case 0x17: /* pop ss */
4048     case 0x1f: /* pop ds */
4049         if (CODE64(s))
4050             goto illegal_op;
4051         reg = b >> 3;
4052         ot = gen_pop_T0(s);
4053         gen_movl_seg_T0(s, reg);
4054         gen_pop_update(s, ot);
4055         break;
4056     case 0x1a1: /* pop fs */
4057     case 0x1a9: /* pop gs */
4058         ot = gen_pop_T0(s);
4059         gen_movl_seg_T0(s, (b >> 3) & 7);
4060         gen_pop_update(s, ot);
4061         break;
4062 
4063         /**************************/
4064         /* mov */
4065     case 0x88:
4066     case 0x89: /* mov Gv, Ev */
4067         ot = mo_b_d(b, dflag);
4068         modrm = x86_ldub_code(env, s);
4069         reg = ((modrm >> 3) & 7) | REX_R(s);
4070 
4071         /* generate a generic store */
4072         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
4073         break;
4074     case 0xc6:
4075     case 0xc7: /* mov Ev, Iv */
4076         ot = mo_b_d(b, dflag);
4077         modrm = x86_ldub_code(env, s);
4078         mod = (modrm >> 6) & 3;
4079         if (mod != 3) {
4080             s->rip_offset = insn_const_size(ot);
4081             gen_lea_modrm(env, s, modrm);
4082         }
4083         val = insn_get(env, s, ot);
4084         tcg_gen_movi_tl(s->T0, val);
4085         if (mod != 3) {
4086             gen_op_st_v(s, ot, s->T0, s->A0);
4087         } else {
4088             gen_op_mov_reg_v(s, ot, (modrm & 7) | REX_B(s), s->T0);
4089         }
4090         break;
4091     case 0x8a:
4092     case 0x8b: /* mov Ev, Gv */
4093         ot = mo_b_d(b, dflag);
4094         modrm = x86_ldub_code(env, s);
4095         reg = ((modrm >> 3) & 7) | REX_R(s);
4096 
4097         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4098         gen_op_mov_reg_v(s, ot, reg, s->T0);
4099         break;
4100     case 0x8e: /* mov seg, Gv */
4101         modrm = x86_ldub_code(env, s);
4102         reg = (modrm >> 3) & 7;
4103         if (reg >= 6 || reg == R_CS)
4104             goto illegal_op;
4105         gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
4106         gen_movl_seg_T0(s, reg);
4107         break;
4108     case 0x8c: /* mov Gv, seg */
4109         modrm = x86_ldub_code(env, s);
4110         reg = (modrm >> 3) & 7;
4111         mod = (modrm >> 6) & 3;
4112         if (reg >= 6)
4113             goto illegal_op;
4114         gen_op_movl_T0_seg(s, reg);
4115         ot = mod == 3 ? dflag : MO_16;
4116         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4117         break;
4118 
4119     case 0x1b6: /* movzbS Gv, Eb */
4120     case 0x1b7: /* movzwS Gv, Eb */
4121     case 0x1be: /* movsbS Gv, Eb */
4122     case 0x1bf: /* movswS Gv, Eb */
4123         {
4124             MemOp d_ot;
4125             MemOp s_ot;
4126 
4127             /* d_ot is the size of destination */
4128             d_ot = dflag;
4129             /* ot is the size of source */
4130             ot = (b & 1) + MO_8;
4131             /* s_ot is the sign+size of source */
4132             s_ot = b & 8 ? MO_SIGN | ot : ot;
4133 
4134             modrm = x86_ldub_code(env, s);
4135             reg = ((modrm >> 3) & 7) | REX_R(s);
4136             mod = (modrm >> 6) & 3;
4137             rm = (modrm & 7) | REX_B(s);
4138 
4139             if (mod == 3) {
4140                 if (s_ot == MO_SB && byte_reg_is_xH(s, rm)) {
4141                     tcg_gen_sextract_tl(s->T0, cpu_regs[rm - 4], 8, 8);
4142                 } else {
4143                     gen_op_mov_v_reg(s, ot, s->T0, rm);
4144                     switch (s_ot) {
4145                     case MO_UB:
4146                         tcg_gen_ext8u_tl(s->T0, s->T0);
4147                         break;
4148                     case MO_SB:
4149                         tcg_gen_ext8s_tl(s->T0, s->T0);
4150                         break;
4151                     case MO_UW:
4152                         tcg_gen_ext16u_tl(s->T0, s->T0);
4153                         break;
4154                     default:
4155                     case MO_SW:
4156                         tcg_gen_ext16s_tl(s->T0, s->T0);
4157                         break;
4158                     }
4159                 }
4160                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4161             } else {
4162                 gen_lea_modrm(env, s, modrm);
4163                 gen_op_ld_v(s, s_ot, s->T0, s->A0);
4164                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
4165             }
4166         }
4167         break;
4168 
4169     case 0x8d: /* lea */
4170         modrm = x86_ldub_code(env, s);
4171         mod = (modrm >> 6) & 3;
4172         if (mod == 3)
4173             goto illegal_op;
4174         reg = ((modrm >> 3) & 7) | REX_R(s);
4175         {
4176             AddressParts a = gen_lea_modrm_0(env, s, modrm);
4177             TCGv ea = gen_lea_modrm_1(s, a, false);
4178             gen_lea_v_seg(s, s->aflag, ea, -1, -1);
4179             gen_op_mov_reg_v(s, dflag, reg, s->A0);
4180         }
4181         break;
4182 
4183     case 0xa0: /* mov EAX, Ov */
4184     case 0xa1:
4185     case 0xa2: /* mov Ov, EAX */
4186     case 0xa3:
4187         {
4188             target_ulong offset_addr;
4189 
4190             ot = mo_b_d(b, dflag);
4191             offset_addr = insn_get_addr(env, s, s->aflag);
4192             tcg_gen_movi_tl(s->A0, offset_addr);
4193             gen_add_A0_ds_seg(s);
4194             if ((b & 2) == 0) {
4195                 gen_op_ld_v(s, ot, s->T0, s->A0);
4196                 gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
4197             } else {
4198                 gen_op_mov_v_reg(s, ot, s->T0, R_EAX);
4199                 gen_op_st_v(s, ot, s->T0, s->A0);
4200             }
4201         }
4202         break;
4203     case 0xd7: /* xlat */
4204         tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]);
4205         tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]);
4206         tcg_gen_add_tl(s->A0, s->A0, s->T0);
4207         gen_add_A0_ds_seg(s);
4208         gen_op_ld_v(s, MO_8, s->T0, s->A0);
4209         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
4210         break;
4211     case 0xb0 ... 0xb7: /* mov R, Ib */
4212         val = insn_get(env, s, MO_8);
4213         tcg_gen_movi_tl(s->T0, val);
4214         gen_op_mov_reg_v(s, MO_8, (b & 7) | REX_B(s), s->T0);
4215         break;
4216     case 0xb8 ... 0xbf: /* mov R, Iv */
4217 #ifdef TARGET_X86_64
4218         if (dflag == MO_64) {
4219             uint64_t tmp;
4220             /* 64 bit case */
4221             tmp = x86_ldq_code(env, s);
4222             reg = (b & 7) | REX_B(s);
4223             tcg_gen_movi_tl(s->T0, tmp);
4224             gen_op_mov_reg_v(s, MO_64, reg, s->T0);
4225         } else
4226 #endif
4227         {
4228             ot = dflag;
4229             val = insn_get(env, s, ot);
4230             reg = (b & 7) | REX_B(s);
4231             tcg_gen_movi_tl(s->T0, val);
4232             gen_op_mov_reg_v(s, ot, reg, s->T0);
4233         }
4234         break;
4235 
4236     case 0x91 ... 0x97: /* xchg R, EAX */
4237     do_xchg_reg_eax:
4238         ot = dflag;
4239         reg = (b & 7) | REX_B(s);
4240         rm = R_EAX;
4241         goto do_xchg_reg;
4242     case 0x86:
4243     case 0x87: /* xchg Ev, Gv */
4244         ot = mo_b_d(b, dflag);
4245         modrm = x86_ldub_code(env, s);
4246         reg = ((modrm >> 3) & 7) | REX_R(s);
4247         mod = (modrm >> 6) & 3;
4248         if (mod == 3) {
4249             rm = (modrm & 7) | REX_B(s);
4250         do_xchg_reg:
4251             gen_op_mov_v_reg(s, ot, s->T0, reg);
4252             gen_op_mov_v_reg(s, ot, s->T1, rm);
4253             gen_op_mov_reg_v(s, ot, rm, s->T0);
4254             gen_op_mov_reg_v(s, ot, reg, s->T1);
4255         } else {
4256             gen_lea_modrm(env, s, modrm);
4257             gen_op_mov_v_reg(s, ot, s->T0, reg);
4258             /* for xchg, lock is implicit */
4259             tcg_gen_atomic_xchg_tl(s->T1, s->A0, s->T0,
4260                                    s->mem_index, ot | MO_LE);
4261             gen_op_mov_reg_v(s, ot, reg, s->T1);
4262         }
4263         break;
4264     case 0xc4: /* les Gv */
4265         /* In CODE64 this is VEX3; see above.  */
4266         op = R_ES;
4267         goto do_lxx;
4268     case 0xc5: /* lds Gv */
4269         /* In CODE64 this is VEX2; see above.  */
4270         op = R_DS;
4271         goto do_lxx;
4272     case 0x1b2: /* lss Gv */
4273         op = R_SS;
4274         goto do_lxx;
4275     case 0x1b4: /* lfs Gv */
4276         op = R_FS;
4277         goto do_lxx;
4278     case 0x1b5: /* lgs Gv */
4279         op = R_GS;
4280     do_lxx:
4281         ot = dflag != MO_16 ? MO_32 : MO_16;
4282         modrm = x86_ldub_code(env, s);
4283         reg = ((modrm >> 3) & 7) | REX_R(s);
4284         mod = (modrm >> 6) & 3;
4285         if (mod == 3)
4286             goto illegal_op;
4287         gen_lea_modrm(env, s, modrm);
4288         gen_op_ld_v(s, ot, s->T1, s->A0);
4289         gen_add_A0_im(s, 1 << ot);
4290         /* load the segment first to handle exceptions properly */
4291         gen_op_ld_v(s, MO_16, s->T0, s->A0);
4292         gen_movl_seg_T0(s, op);
4293         /* then put the data */
4294         gen_op_mov_reg_v(s, ot, reg, s->T1);
4295         break;
4296 
4297         /************************/
4298         /* shifts */
4299     case 0xc0:
4300     case 0xc1:
4301         /* shift Ev,Ib */
4302         shift = 2;
4303     grp2:
4304         {
4305             ot = mo_b_d(b, dflag);
4306             modrm = x86_ldub_code(env, s);
4307             mod = (modrm >> 6) & 3;
4308             op = (modrm >> 3) & 7;
4309 
4310             if (mod != 3) {
4311                 if (shift == 2) {
4312                     s->rip_offset = 1;
4313                 }
4314                 gen_lea_modrm(env, s, modrm);
4315                 opreg = OR_TMP0;
4316             } else {
4317                 opreg = (modrm & 7) | REX_B(s);
4318             }
4319 
4320             /* simpler op */
4321             if (shift == 0) {
4322                 gen_shift(s, op, ot, opreg, OR_ECX);
4323             } else {
4324                 if (shift == 2) {
4325                     shift = x86_ldub_code(env, s);
4326                 }
4327                 gen_shifti(s, op, ot, opreg, shift);
4328             }
4329         }
4330         break;
4331     case 0xd0:
4332     case 0xd1:
4333         /* shift Ev,1 */
4334         shift = 1;
4335         goto grp2;
4336     case 0xd2:
4337     case 0xd3:
4338         /* shift Ev,cl */
4339         shift = 0;
4340         goto grp2;
4341 
4342     case 0x1a4: /* shld imm */
4343         op = 0;
4344         shift = 1;
4345         goto do_shiftd;
4346     case 0x1a5: /* shld cl */
4347         op = 0;
4348         shift = 0;
4349         goto do_shiftd;
4350     case 0x1ac: /* shrd imm */
4351         op = 1;
4352         shift = 1;
4353         goto do_shiftd;
4354     case 0x1ad: /* shrd cl */
4355         op = 1;
4356         shift = 0;
4357     do_shiftd:
4358         ot = dflag;
4359         modrm = x86_ldub_code(env, s);
4360         mod = (modrm >> 6) & 3;
4361         rm = (modrm & 7) | REX_B(s);
4362         reg = ((modrm >> 3) & 7) | REX_R(s);
4363         if (mod != 3) {
4364             gen_lea_modrm(env, s, modrm);
4365             opreg = OR_TMP0;
4366         } else {
4367             opreg = rm;
4368         }
4369         gen_op_mov_v_reg(s, ot, s->T1, reg);
4370 
4371         if (shift) {
4372             TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
4373             gen_shiftd_rm_T1(s, ot, opreg, op, imm);
4374         } else {
4375             gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
4376         }
4377         break;
4378 
4379         /************************/
4380         /* floats */
4381     case 0xd8 ... 0xdf:
4382         {
4383             bool update_fip = true;
4384 
4385             if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
4386                 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4387                 /* XXX: what to do if illegal op ? */
4388                 gen_exception(s, EXCP07_PREX);
4389                 break;
4390             }
4391             modrm = x86_ldub_code(env, s);
4392             mod = (modrm >> 6) & 3;
4393             rm = modrm & 7;
4394             op = ((b & 7) << 3) | ((modrm >> 3) & 7);
4395             if (mod != 3) {
4396                 /* memory op */
4397                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
4398                 TCGv ea = gen_lea_modrm_1(s, a, false);
4399                 TCGv last_addr = tcg_temp_new();
4400                 bool update_fdp = true;
4401 
4402                 tcg_gen_mov_tl(last_addr, ea);
4403                 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
4404 
4405                 switch (op) {
4406                 case 0x00 ... 0x07: /* fxxxs */
4407                 case 0x10 ... 0x17: /* fixxxl */
4408                 case 0x20 ... 0x27: /* fxxxl */
4409                 case 0x30 ... 0x37: /* fixxx */
4410                     {
4411                         int op1;
4412                         op1 = op & 7;
4413 
4414                         switch (op >> 4) {
4415                         case 0:
4416                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4417                                                 s->mem_index, MO_LEUL);
4418                             gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
4419                             break;
4420                         case 1:
4421                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4422                                                 s->mem_index, MO_LEUL);
4423                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4424                             break;
4425                         case 2:
4426                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4427                                                 s->mem_index, MO_LEUQ);
4428                             gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
4429                             break;
4430                         case 3:
4431                         default:
4432                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4433                                                 s->mem_index, MO_LESW);
4434                             gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
4435                             break;
4436                         }
4437 
4438                         gen_helper_fp_arith_ST0_FT0(op1);
4439                         if (op1 == 3) {
4440                             /* fcomp needs pop */
4441                             gen_helper_fpop(tcg_env);
4442                         }
4443                     }
4444                     break;
4445                 case 0x08: /* flds */
4446                 case 0x0a: /* fsts */
4447                 case 0x0b: /* fstps */
4448                 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4449                 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4450                 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4451                     switch (op & 7) {
4452                     case 0:
4453                         switch (op >> 4) {
4454                         case 0:
4455                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4456                                                 s->mem_index, MO_LEUL);
4457                             gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
4458                             break;
4459                         case 1:
4460                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4461                                                 s->mem_index, MO_LEUL);
4462                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4463                             break;
4464                         case 2:
4465                             tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4466                                                 s->mem_index, MO_LEUQ);
4467                             gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
4468                             break;
4469                         case 3:
4470                         default:
4471                             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4472                                                 s->mem_index, MO_LESW);
4473                             gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
4474                             break;
4475                         }
4476                         break;
4477                     case 1:
4478                         /* XXX: the corresponding CPUID bit must be tested ! */
4479                         switch (op >> 4) {
4480                         case 1:
4481                             gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
4482                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4483                                                 s->mem_index, MO_LEUL);
4484                             break;
4485                         case 2:
4486                             gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
4487                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4488                                                 s->mem_index, MO_LEUQ);
4489                             break;
4490                         case 3:
4491                         default:
4492                             gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
4493                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4494                                                 s->mem_index, MO_LEUW);
4495                             break;
4496                         }
4497                         gen_helper_fpop(tcg_env);
4498                         break;
4499                     default:
4500                         switch (op >> 4) {
4501                         case 0:
4502                             gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
4503                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4504                                                 s->mem_index, MO_LEUL);
4505                             break;
4506                         case 1:
4507                             gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
4508                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4509                                                 s->mem_index, MO_LEUL);
4510                             break;
4511                         case 2:
4512                             gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
4513                             tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4514                                                 s->mem_index, MO_LEUQ);
4515                             break;
4516                         case 3:
4517                         default:
4518                             gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
4519                             tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4520                                                 s->mem_index, MO_LEUW);
4521                             break;
4522                         }
4523                         if ((op & 7) == 3) {
4524                             gen_helper_fpop(tcg_env);
4525                         }
4526                         break;
4527                     }
4528                     break;
4529                 case 0x0c: /* fldenv mem */
4530                     gen_helper_fldenv(tcg_env, s->A0,
4531                                       tcg_constant_i32(dflag - 1));
4532                     update_fip = update_fdp = false;
4533                     break;
4534                 case 0x0d: /* fldcw mem */
4535                     tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
4536                                         s->mem_index, MO_LEUW);
4537                     gen_helper_fldcw(tcg_env, s->tmp2_i32);
4538                     update_fip = update_fdp = false;
4539                     break;
4540                 case 0x0e: /* fnstenv mem */
4541                     gen_helper_fstenv(tcg_env, s->A0,
4542                                       tcg_constant_i32(dflag - 1));
4543                     update_fip = update_fdp = false;
4544                     break;
4545                 case 0x0f: /* fnstcw mem */
4546                     gen_helper_fnstcw(s->tmp2_i32, tcg_env);
4547                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4548                                         s->mem_index, MO_LEUW);
4549                     update_fip = update_fdp = false;
4550                     break;
4551                 case 0x1d: /* fldt mem */
4552                     gen_helper_fldt_ST0(tcg_env, s->A0);
4553                     break;
4554                 case 0x1f: /* fstpt mem */
4555                     gen_helper_fstt_ST0(tcg_env, s->A0);
4556                     gen_helper_fpop(tcg_env);
4557                     break;
4558                 case 0x2c: /* frstor mem */
4559                     gen_helper_frstor(tcg_env, s->A0,
4560                                       tcg_constant_i32(dflag - 1));
4561                     update_fip = update_fdp = false;
4562                     break;
4563                 case 0x2e: /* fnsave mem */
4564                     gen_helper_fsave(tcg_env, s->A0,
4565                                      tcg_constant_i32(dflag - 1));
4566                     update_fip = update_fdp = false;
4567                     break;
4568                 case 0x2f: /* fnstsw mem */
4569                     gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4570                     tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
4571                                         s->mem_index, MO_LEUW);
4572                     update_fip = update_fdp = false;
4573                     break;
4574                 case 0x3c: /* fbld */
4575                     gen_helper_fbld_ST0(tcg_env, s->A0);
4576                     break;
4577                 case 0x3e: /* fbstp */
4578                     gen_helper_fbst_ST0(tcg_env, s->A0);
4579                     gen_helper_fpop(tcg_env);
4580                     break;
4581                 case 0x3d: /* fildll */
4582                     tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
4583                                         s->mem_index, MO_LEUQ);
4584                     gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
4585                     break;
4586                 case 0x3f: /* fistpll */
4587                     gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
4588                     tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
4589                                         s->mem_index, MO_LEUQ);
4590                     gen_helper_fpop(tcg_env);
4591                     break;
4592                 default:
4593                     goto unknown_op;
4594                 }
4595 
4596                 if (update_fdp) {
4597                     int last_seg = s->override >= 0 ? s->override : a.def_seg;
4598 
4599                     tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4600                                    offsetof(CPUX86State,
4601                                             segs[last_seg].selector));
4602                     tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4603                                      offsetof(CPUX86State, fpds));
4604                     tcg_gen_st_tl(last_addr, tcg_env,
4605                                   offsetof(CPUX86State, fpdp));
4606                 }
4607             } else {
4608                 /* register float ops */
4609                 opreg = rm;
4610 
4611                 switch (op) {
4612                 case 0x08: /* fld sti */
4613                     gen_helper_fpush(tcg_env);
4614                     gen_helper_fmov_ST0_STN(tcg_env,
4615                                             tcg_constant_i32((opreg + 1) & 7));
4616                     break;
4617                 case 0x09: /* fxchg sti */
4618                 case 0x29: /* fxchg4 sti, undocumented op */
4619                 case 0x39: /* fxchg7 sti, undocumented op */
4620                     gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
4621                     break;
4622                 case 0x0a: /* grp d9/2 */
4623                     switch (rm) {
4624                     case 0: /* fnop */
4625                         /*
4626                          * check exceptions (FreeBSD FPU probe)
4627                          * needs to be treated as I/O because of ferr_irq
4628                          */
4629                         translator_io_start(&s->base);
4630                         gen_helper_fwait(tcg_env);
4631                         update_fip = false;
4632                         break;
4633                     default:
4634                         goto unknown_op;
4635                     }
4636                     break;
4637                 case 0x0c: /* grp d9/4 */
4638                     switch (rm) {
4639                     case 0: /* fchs */
4640                         gen_helper_fchs_ST0(tcg_env);
4641                         break;
4642                     case 1: /* fabs */
4643                         gen_helper_fabs_ST0(tcg_env);
4644                         break;
4645                     case 4: /* ftst */
4646                         gen_helper_fldz_FT0(tcg_env);
4647                         gen_helper_fcom_ST0_FT0(tcg_env);
4648                         break;
4649                     case 5: /* fxam */
4650                         gen_helper_fxam_ST0(tcg_env);
4651                         break;
4652                     default:
4653                         goto unknown_op;
4654                     }
4655                     break;
4656                 case 0x0d: /* grp d9/5 */
4657                     {
4658                         switch (rm) {
4659                         case 0:
4660                             gen_helper_fpush(tcg_env);
4661                             gen_helper_fld1_ST0(tcg_env);
4662                             break;
4663                         case 1:
4664                             gen_helper_fpush(tcg_env);
4665                             gen_helper_fldl2t_ST0(tcg_env);
4666                             break;
4667                         case 2:
4668                             gen_helper_fpush(tcg_env);
4669                             gen_helper_fldl2e_ST0(tcg_env);
4670                             break;
4671                         case 3:
4672                             gen_helper_fpush(tcg_env);
4673                             gen_helper_fldpi_ST0(tcg_env);
4674                             break;
4675                         case 4:
4676                             gen_helper_fpush(tcg_env);
4677                             gen_helper_fldlg2_ST0(tcg_env);
4678                             break;
4679                         case 5:
4680                             gen_helper_fpush(tcg_env);
4681                             gen_helper_fldln2_ST0(tcg_env);
4682                             break;
4683                         case 6:
4684                             gen_helper_fpush(tcg_env);
4685                             gen_helper_fldz_ST0(tcg_env);
4686                             break;
4687                         default:
4688                             goto unknown_op;
4689                         }
4690                     }
4691                     break;
4692                 case 0x0e: /* grp d9/6 */
4693                     switch (rm) {
4694                     case 0: /* f2xm1 */
4695                         gen_helper_f2xm1(tcg_env);
4696                         break;
4697                     case 1: /* fyl2x */
4698                         gen_helper_fyl2x(tcg_env);
4699                         break;
4700                     case 2: /* fptan */
4701                         gen_helper_fptan(tcg_env);
4702                         break;
4703                     case 3: /* fpatan */
4704                         gen_helper_fpatan(tcg_env);
4705                         break;
4706                     case 4: /* fxtract */
4707                         gen_helper_fxtract(tcg_env);
4708                         break;
4709                     case 5: /* fprem1 */
4710                         gen_helper_fprem1(tcg_env);
4711                         break;
4712                     case 6: /* fdecstp */
4713                         gen_helper_fdecstp(tcg_env);
4714                         break;
4715                     default:
4716                     case 7: /* fincstp */
4717                         gen_helper_fincstp(tcg_env);
4718                         break;
4719                     }
4720                     break;
4721                 case 0x0f: /* grp d9/7 */
4722                     switch (rm) {
4723                     case 0: /* fprem */
4724                         gen_helper_fprem(tcg_env);
4725                         break;
4726                     case 1: /* fyl2xp1 */
4727                         gen_helper_fyl2xp1(tcg_env);
4728                         break;
4729                     case 2: /* fsqrt */
4730                         gen_helper_fsqrt(tcg_env);
4731                         break;
4732                     case 3: /* fsincos */
4733                         gen_helper_fsincos(tcg_env);
4734                         break;
4735                     case 5: /* fscale */
4736                         gen_helper_fscale(tcg_env);
4737                         break;
4738                     case 4: /* frndint */
4739                         gen_helper_frndint(tcg_env);
4740                         break;
4741                     case 6: /* fsin */
4742                         gen_helper_fsin(tcg_env);
4743                         break;
4744                     default:
4745                     case 7: /* fcos */
4746                         gen_helper_fcos(tcg_env);
4747                         break;
4748                     }
4749                     break;
4750                 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4751                 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4752                 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4753                     {
4754                         int op1;
4755 
4756                         op1 = op & 7;
4757                         if (op >= 0x20) {
4758                             gen_helper_fp_arith_STN_ST0(op1, opreg);
4759                             if (op >= 0x30) {
4760                                 gen_helper_fpop(tcg_env);
4761                             }
4762                         } else {
4763                             gen_helper_fmov_FT0_STN(tcg_env,
4764                                                     tcg_constant_i32(opreg));
4765                             gen_helper_fp_arith_ST0_FT0(op1);
4766                         }
4767                     }
4768                     break;
4769                 case 0x02: /* fcom */
4770                 case 0x22: /* fcom2, undocumented op */
4771                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4772                     gen_helper_fcom_ST0_FT0(tcg_env);
4773                     break;
4774                 case 0x03: /* fcomp */
4775                 case 0x23: /* fcomp3, undocumented op */
4776                 case 0x32: /* fcomp5, undocumented op */
4777                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4778                     gen_helper_fcom_ST0_FT0(tcg_env);
4779                     gen_helper_fpop(tcg_env);
4780                     break;
4781                 case 0x15: /* da/5 */
4782                     switch (rm) {
4783                     case 1: /* fucompp */
4784                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4785                         gen_helper_fucom_ST0_FT0(tcg_env);
4786                         gen_helper_fpop(tcg_env);
4787                         gen_helper_fpop(tcg_env);
4788                         break;
4789                     default:
4790                         goto unknown_op;
4791                     }
4792                     break;
4793                 case 0x1c:
4794                     switch (rm) {
4795                     case 0: /* feni (287 only, just do nop here) */
4796                         break;
4797                     case 1: /* fdisi (287 only, just do nop here) */
4798                         break;
4799                     case 2: /* fclex */
4800                         gen_helper_fclex(tcg_env);
4801                         update_fip = false;
4802                         break;
4803                     case 3: /* fninit */
4804                         gen_helper_fninit(tcg_env);
4805                         update_fip = false;
4806                         break;
4807                     case 4: /* fsetpm (287 only, just do nop here) */
4808                         break;
4809                     default:
4810                         goto unknown_op;
4811                     }
4812                     break;
4813                 case 0x1d: /* fucomi */
4814                     if (!(s->cpuid_features & CPUID_CMOV)) {
4815                         goto illegal_op;
4816                     }
4817                     gen_update_cc_op(s);
4818                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4819                     gen_helper_fucomi_ST0_FT0(tcg_env);
4820                     set_cc_op(s, CC_OP_EFLAGS);
4821                     break;
4822                 case 0x1e: /* fcomi */
4823                     if (!(s->cpuid_features & CPUID_CMOV)) {
4824                         goto illegal_op;
4825                     }
4826                     gen_update_cc_op(s);
4827                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4828                     gen_helper_fcomi_ST0_FT0(tcg_env);
4829                     set_cc_op(s, CC_OP_EFLAGS);
4830                     break;
4831                 case 0x28: /* ffree sti */
4832                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4833                     break;
4834                 case 0x2a: /* fst sti */
4835                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4836                     break;
4837                 case 0x2b: /* fstp sti */
4838                 case 0x0b: /* fstp1 sti, undocumented op */
4839                 case 0x3a: /* fstp8 sti, undocumented op */
4840                 case 0x3b: /* fstp9 sti, undocumented op */
4841                     gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
4842                     gen_helper_fpop(tcg_env);
4843                     break;
4844                 case 0x2c: /* fucom st(i) */
4845                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4846                     gen_helper_fucom_ST0_FT0(tcg_env);
4847                     break;
4848                 case 0x2d: /* fucomp st(i) */
4849                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4850                     gen_helper_fucom_ST0_FT0(tcg_env);
4851                     gen_helper_fpop(tcg_env);
4852                     break;
4853                 case 0x33: /* de/3 */
4854                     switch (rm) {
4855                     case 1: /* fcompp */
4856                         gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
4857                         gen_helper_fcom_ST0_FT0(tcg_env);
4858                         gen_helper_fpop(tcg_env);
4859                         gen_helper_fpop(tcg_env);
4860                         break;
4861                     default:
4862                         goto unknown_op;
4863                     }
4864                     break;
4865                 case 0x38: /* ffreep sti, undocumented op */
4866                     gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
4867                     gen_helper_fpop(tcg_env);
4868                     break;
4869                 case 0x3c: /* df/4 */
4870                     switch (rm) {
4871                     case 0:
4872                         gen_helper_fnstsw(s->tmp2_i32, tcg_env);
4873                         tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
4874                         gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
4875                         break;
4876                     default:
4877                         goto unknown_op;
4878                     }
4879                     break;
4880                 case 0x3d: /* fucomip */
4881                     if (!(s->cpuid_features & CPUID_CMOV)) {
4882                         goto illegal_op;
4883                     }
4884                     gen_update_cc_op(s);
4885                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4886                     gen_helper_fucomi_ST0_FT0(tcg_env);
4887                     gen_helper_fpop(tcg_env);
4888                     set_cc_op(s, CC_OP_EFLAGS);
4889                     break;
4890                 case 0x3e: /* fcomip */
4891                     if (!(s->cpuid_features & CPUID_CMOV)) {
4892                         goto illegal_op;
4893                     }
4894                     gen_update_cc_op(s);
4895                     gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
4896                     gen_helper_fcomi_ST0_FT0(tcg_env);
4897                     gen_helper_fpop(tcg_env);
4898                     set_cc_op(s, CC_OP_EFLAGS);
4899                     break;
4900                 case 0x10 ... 0x13: /* fcmovxx */
4901                 case 0x18 ... 0x1b:
4902                     {
4903                         int op1;
4904                         TCGLabel *l1;
4905                         static const uint8_t fcmov_cc[8] = {
4906                             (JCC_B << 1),
4907                             (JCC_Z << 1),
4908                             (JCC_BE << 1),
4909                             (JCC_P << 1),
4910                         };
4911 
4912                         if (!(s->cpuid_features & CPUID_CMOV)) {
4913                             goto illegal_op;
4914                         }
4915                         op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
4916                         l1 = gen_new_label();
4917                         gen_jcc1_noeob(s, op1, l1);
4918                         gen_helper_fmov_ST0_STN(tcg_env,
4919                                                 tcg_constant_i32(opreg));
4920                         gen_set_label(l1);
4921                     }
4922                     break;
4923                 default:
4924                     goto unknown_op;
4925                 }
4926             }
4927 
4928             if (update_fip) {
4929                 tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
4930                                offsetof(CPUX86State, segs[R_CS].selector));
4931                 tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
4932                                  offsetof(CPUX86State, fpcs));
4933                 tcg_gen_st_tl(eip_cur_tl(s),
4934                               tcg_env, offsetof(CPUX86State, fpip));
4935             }
4936         }
4937         break;
4938         /************************/
4939         /* string ops */
4940 
4941     case 0xa4: /* movsS */
4942     case 0xa5:
4943         ot = mo_b_d(b, dflag);
4944         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4945             gen_repz_movs(s, ot);
4946         } else {
4947             gen_movs(s, ot);
4948         }
4949         break;
4950 
4951     case 0xaa: /* stosS */
4952     case 0xab:
4953         ot = mo_b_d(b, dflag);
4954         gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
4955         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4956             gen_repz_stos(s, ot);
4957         } else {
4958             gen_stos(s, ot);
4959         }
4960         break;
4961     case 0xac: /* lodsS */
4962     case 0xad:
4963         ot = mo_b_d(b, dflag);
4964         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
4965             gen_repz_lods(s, ot);
4966         } else {
4967             gen_lods(s, ot);
4968         }
4969         break;
4970     case 0xae: /* scasS */
4971     case 0xaf:
4972         ot = mo_b_d(b, dflag);
4973         gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX);
4974         if (prefixes & PREFIX_REPNZ) {
4975             gen_repz_scas(s, ot, 1);
4976         } else if (prefixes & PREFIX_REPZ) {
4977             gen_repz_scas(s, ot, 0);
4978         } else {
4979             gen_scas(s, ot);
4980         }
4981         break;
4982 
4983     case 0xa6: /* cmpsS */
4984     case 0xa7:
4985         ot = mo_b_d(b, dflag);
4986         if (prefixes & PREFIX_REPNZ) {
4987             gen_repz_cmps(s, ot, 1);
4988         } else if (prefixes & PREFIX_REPZ) {
4989             gen_repz_cmps(s, ot, 0);
4990         } else {
4991             gen_cmps(s, ot);
4992         }
4993         break;
4994     case 0x6c: /* insS */
4995     case 0x6d:
4996         ot = mo_b_d32(b, dflag);
4997         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
4998         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
4999         if (!gen_check_io(s, ot, s->tmp2_i32,
5000                           SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
5001             break;
5002         }
5003         translator_io_start(&s->base);
5004         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5005             gen_repz_ins(s, ot);
5006         } else {
5007             gen_ins(s, ot);
5008         }
5009         break;
5010     case 0x6e: /* outsS */
5011     case 0x6f:
5012         ot = mo_b_d32(b, dflag);
5013         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5014         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5015         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
5016             break;
5017         }
5018         translator_io_start(&s->base);
5019         if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5020             gen_repz_outs(s, ot);
5021         } else {
5022             gen_outs(s, ot);
5023         }
5024         break;
5025 
5026         /************************/
5027         /* port I/O */
5028 
5029     case 0xe4:
5030     case 0xe5:
5031         ot = mo_b_d32(b, dflag);
5032         val = x86_ldub_code(env, s);
5033         tcg_gen_movi_i32(s->tmp2_i32, val);
5034         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5035             break;
5036         }
5037         translator_io_start(&s->base);
5038         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5039         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5040         gen_bpt_io(s, s->tmp2_i32, ot);
5041         break;
5042     case 0xe6:
5043     case 0xe7:
5044         ot = mo_b_d32(b, dflag);
5045         val = x86_ldub_code(env, s);
5046         tcg_gen_movi_i32(s->tmp2_i32, val);
5047         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5048             break;
5049         }
5050         translator_io_start(&s->base);
5051         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5052         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5053         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5054         gen_bpt_io(s, s->tmp2_i32, ot);
5055         break;
5056     case 0xec:
5057     case 0xed:
5058         ot = mo_b_d32(b, dflag);
5059         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5060         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5061         if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
5062             break;
5063         }
5064         translator_io_start(&s->base);
5065         gen_helper_in_func(ot, s->T1, s->tmp2_i32);
5066         gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
5067         gen_bpt_io(s, s->tmp2_i32, ot);
5068         break;
5069     case 0xee:
5070     case 0xef:
5071         ot = mo_b_d32(b, dflag);
5072         tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
5073         tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
5074         if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
5075             break;
5076         }
5077         translator_io_start(&s->base);
5078         gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
5079         tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
5080         gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
5081         gen_bpt_io(s, s->tmp2_i32, ot);
5082         break;
5083 
5084         /************************/
5085         /* control */
5086     case 0xc2: /* ret im */
5087         val = x86_ldsw_code(env, s);
5088         ot = gen_pop_T0(s);
5089         gen_stack_update(s, val + (1 << ot));
5090         /* Note that gen_pop_T0 uses a zero-extending load.  */
5091         gen_op_jmp_v(s, s->T0);
5092         gen_bnd_jmp(s);
5093         s->base.is_jmp = DISAS_JUMP;
5094         break;
5095     case 0xc3: /* ret */
5096         ot = gen_pop_T0(s);
5097         gen_pop_update(s, ot);
5098         /* Note that gen_pop_T0 uses a zero-extending load.  */
5099         gen_op_jmp_v(s, s->T0);
5100         gen_bnd_jmp(s);
5101         s->base.is_jmp = DISAS_JUMP;
5102         break;
5103     case 0xca: /* lret im */
5104         val = x86_ldsw_code(env, s);
5105     do_lret:
5106         if (PE(s) && !VM86(s)) {
5107             gen_update_cc_op(s);
5108             gen_update_eip_cur(s);
5109             gen_helper_lret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5110                                       tcg_constant_i32(val));
5111         } else {
5112             gen_stack_A0(s);
5113             /* pop offset */
5114             gen_op_ld_v(s, dflag, s->T0, s->A0);
5115             /* NOTE: keeping EIP updated is not a problem in case of
5116                exception */
5117             gen_op_jmp_v(s, s->T0);
5118             /* pop selector */
5119             gen_add_A0_im(s, 1 << dflag);
5120             gen_op_ld_v(s, dflag, s->T0, s->A0);
5121             gen_op_movl_seg_T0_vm(s, R_CS);
5122             /* add stack offset */
5123             gen_stack_update(s, val + (2 << dflag));
5124         }
5125         s->base.is_jmp = DISAS_EOB_ONLY;
5126         break;
5127     case 0xcb: /* lret */
5128         val = 0;
5129         goto do_lret;
5130     case 0xcf: /* iret */
5131         gen_svm_check_intercept(s, SVM_EXIT_IRET);
5132         if (!PE(s) || VM86(s)) {
5133             /* real mode or vm86 mode */
5134             if (!check_vm86_iopl(s)) {
5135                 break;
5136             }
5137             gen_helper_iret_real(tcg_env, tcg_constant_i32(dflag - 1));
5138         } else {
5139             gen_helper_iret_protected(tcg_env, tcg_constant_i32(dflag - 1),
5140                                       eip_next_i32(s));
5141         }
5142         set_cc_op(s, CC_OP_EFLAGS);
5143         s->base.is_jmp = DISAS_EOB_ONLY;
5144         break;
5145     case 0xe8: /* call im */
5146         {
5147             int diff = (dflag != MO_16
5148                         ? (int32_t)insn_get(env, s, MO_32)
5149                         : (int16_t)insn_get(env, s, MO_16));
5150             gen_push_v(s, eip_next_tl(s));
5151             gen_bnd_jmp(s);
5152             gen_jmp_rel(s, dflag, diff, 0);
5153         }
5154         break;
5155     case 0x9a: /* lcall im */
5156         {
5157             unsigned int selector, offset;
5158 
5159             if (CODE64(s))
5160                 goto illegal_op;
5161             ot = dflag;
5162             offset = insn_get(env, s, ot);
5163             selector = insn_get(env, s, MO_16);
5164 
5165             tcg_gen_movi_tl(s->T0, selector);
5166             tcg_gen_movi_tl(s->T1, offset);
5167         }
5168         goto do_lcall;
5169     case 0xe9: /* jmp im */
5170         {
5171             int diff = (dflag != MO_16
5172                         ? (int32_t)insn_get(env, s, MO_32)
5173                         : (int16_t)insn_get(env, s, MO_16));
5174             gen_bnd_jmp(s);
5175             gen_jmp_rel(s, dflag, diff, 0);
5176         }
5177         break;
5178     case 0xea: /* ljmp im */
5179         {
5180             unsigned int selector, offset;
5181 
5182             if (CODE64(s))
5183                 goto illegal_op;
5184             ot = dflag;
5185             offset = insn_get(env, s, ot);
5186             selector = insn_get(env, s, MO_16);
5187 
5188             tcg_gen_movi_tl(s->T0, selector);
5189             tcg_gen_movi_tl(s->T1, offset);
5190         }
5191         goto do_ljmp;
5192     case 0xeb: /* jmp Jb */
5193         {
5194             int diff = (int8_t)insn_get(env, s, MO_8);
5195             gen_jmp_rel(s, dflag, diff, 0);
5196         }
5197         break;
5198     case 0x70 ... 0x7f: /* jcc Jb */
5199         {
5200             int diff = (int8_t)insn_get(env, s, MO_8);
5201             gen_bnd_jmp(s);
5202             gen_jcc(s, b, diff);
5203         }
5204         break;
5205     case 0x180 ... 0x18f: /* jcc Jv */
5206         {
5207             int diff = (dflag != MO_16
5208                         ? (int32_t)insn_get(env, s, MO_32)
5209                         : (int16_t)insn_get(env, s, MO_16));
5210             gen_bnd_jmp(s);
5211             gen_jcc(s, b, diff);
5212         }
5213         break;
5214 
5215     case 0x190 ... 0x19f: /* setcc Gv */
5216         modrm = x86_ldub_code(env, s);
5217         gen_setcc1(s, b, s->T0);
5218         gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
5219         break;
5220     case 0x140 ... 0x14f: /* cmov Gv, Ev */
5221         if (!(s->cpuid_features & CPUID_CMOV)) {
5222             goto illegal_op;
5223         }
5224         ot = dflag;
5225         modrm = x86_ldub_code(env, s);
5226         reg = ((modrm >> 3) & 7) | REX_R(s);
5227         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5228         gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]);
5229         gen_op_mov_reg_v(s, ot, reg, s->T0);
5230         break;
5231 
5232         /************************/
5233         /* flags */
5234     case 0x9c: /* pushf */
5235         gen_svm_check_intercept(s, SVM_EXIT_PUSHF);
5236         if (check_vm86_iopl(s)) {
5237             gen_update_cc_op(s);
5238             gen_helper_read_eflags(s->T0, tcg_env);
5239             gen_push_v(s, s->T0);
5240         }
5241         break;
5242     case 0x9d: /* popf */
5243         gen_svm_check_intercept(s, SVM_EXIT_POPF);
5244         if (check_vm86_iopl(s)) {
5245             int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
5246 
5247             if (CPL(s) == 0) {
5248                 mask |= IF_MASK | IOPL_MASK;
5249             } else if (CPL(s) <= IOPL(s)) {
5250                 mask |= IF_MASK;
5251             }
5252             if (dflag == MO_16) {
5253                 mask &= 0xffff;
5254             }
5255 
5256             ot = gen_pop_T0(s);
5257             gen_helper_write_eflags(tcg_env, s->T0, tcg_constant_i32(mask));
5258             gen_pop_update(s, ot);
5259             set_cc_op(s, CC_OP_EFLAGS);
5260             /* abort translation because TF/AC flag may change */
5261             s->base.is_jmp = DISAS_EOB_NEXT;
5262         }
5263         break;
5264     case 0x9e: /* sahf */
5265         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5266             goto illegal_op;
5267         tcg_gen_shri_tl(s->T0, cpu_regs[R_EAX], 8);
5268         gen_compute_eflags(s);
5269         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
5270         tcg_gen_andi_tl(s->T0, s->T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
5271         tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, s->T0);
5272         break;
5273     case 0x9f: /* lahf */
5274         if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
5275             goto illegal_op;
5276         gen_compute_eflags(s);
5277         /* Note: gen_compute_eflags() only gives the condition codes */
5278         tcg_gen_ori_tl(s->T0, cpu_cc_src, 0x02);
5279         tcg_gen_deposit_tl(cpu_regs[R_EAX], cpu_regs[R_EAX], s->T0, 8, 8);
5280         break;
5281     case 0xf5: /* cmc */
5282         gen_compute_eflags(s);
5283         tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5284         break;
5285     case 0xf8: /* clc */
5286         gen_compute_eflags(s);
5287         tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
5288         break;
5289     case 0xf9: /* stc */
5290         gen_compute_eflags(s);
5291         tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
5292         break;
5293     case 0xfc: /* cld */
5294         tcg_gen_movi_i32(s->tmp2_i32, 1);
5295         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5296         break;
5297     case 0xfd: /* std */
5298         tcg_gen_movi_i32(s->tmp2_i32, -1);
5299         tcg_gen_st_i32(s->tmp2_i32, tcg_env, offsetof(CPUX86State, df));
5300         break;
5301 
5302         /************************/
5303         /* bit operations */
5304     case 0x1ba: /* bt/bts/btr/btc Gv, im */
5305         ot = dflag;
5306         modrm = x86_ldub_code(env, s);
5307         op = (modrm >> 3) & 7;
5308         mod = (modrm >> 6) & 3;
5309         rm = (modrm & 7) | REX_B(s);
5310         if (mod != 3) {
5311             s->rip_offset = 1;
5312             gen_lea_modrm(env, s, modrm);
5313             if (!(s->prefix & PREFIX_LOCK)) {
5314                 gen_op_ld_v(s, ot, s->T0, s->A0);
5315             }
5316         } else {
5317             gen_op_mov_v_reg(s, ot, s->T0, rm);
5318         }
5319         /* load shift */
5320         val = x86_ldub_code(env, s);
5321         tcg_gen_movi_tl(s->T1, val);
5322         if (op < 4)
5323             goto unknown_op;
5324         op -= 4;
5325         goto bt_op;
5326     case 0x1a3: /* bt Gv, Ev */
5327         op = 0;
5328         goto do_btx;
5329     case 0x1ab: /* bts */
5330         op = 1;
5331         goto do_btx;
5332     case 0x1b3: /* btr */
5333         op = 2;
5334         goto do_btx;
5335     case 0x1bb: /* btc */
5336         op = 3;
5337     do_btx:
5338         ot = dflag;
5339         modrm = x86_ldub_code(env, s);
5340         reg = ((modrm >> 3) & 7) | REX_R(s);
5341         mod = (modrm >> 6) & 3;
5342         rm = (modrm & 7) | REX_B(s);
5343         gen_op_mov_v_reg(s, MO_32, s->T1, reg);
5344         if (mod != 3) {
5345             AddressParts a = gen_lea_modrm_0(env, s, modrm);
5346             /* specific case: we need to add a displacement */
5347             gen_exts(ot, s->T1);
5348             tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
5349             tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
5350             tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
5351             gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
5352             if (!(s->prefix & PREFIX_LOCK)) {
5353                 gen_op_ld_v(s, ot, s->T0, s->A0);
5354             }
5355         } else {
5356             gen_op_mov_v_reg(s, ot, s->T0, rm);
5357         }
5358     bt_op:
5359         tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
5360         tcg_gen_movi_tl(s->tmp0, 1);
5361         tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
5362         if (s->prefix & PREFIX_LOCK) {
5363             switch (op) {
5364             case 0: /* bt */
5365                 /* Needs no atomic ops; we suppressed the normal
5366                    memory load for LOCK above so do it now.  */
5367                 gen_op_ld_v(s, ot, s->T0, s->A0);
5368                 break;
5369             case 1: /* bts */
5370                 tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
5371                                            s->mem_index, ot | MO_LE);
5372                 break;
5373             case 2: /* btr */
5374                 tcg_gen_not_tl(s->tmp0, s->tmp0);
5375                 tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
5376                                             s->mem_index, ot | MO_LE);
5377                 break;
5378             default:
5379             case 3: /* btc */
5380                 tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
5381                                             s->mem_index, ot | MO_LE);
5382                 break;
5383             }
5384             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5385         } else {
5386             tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
5387             switch (op) {
5388             case 0: /* bt */
5389                 /* Data already loaded; nothing to do.  */
5390                 break;
5391             case 1: /* bts */
5392                 tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
5393                 break;
5394             case 2: /* btr */
5395                 tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
5396                 break;
5397             default:
5398             case 3: /* btc */
5399                 tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
5400                 break;
5401             }
5402             if (op != 0) {
5403                 if (mod != 3) {
5404                     gen_op_st_v(s, ot, s->T0, s->A0);
5405                 } else {
5406                     gen_op_mov_reg_v(s, ot, rm, s->T0);
5407                 }
5408             }
5409         }
5410 
5411         /* Delay all CC updates until after the store above.  Note that
5412            C is the result of the test, Z is unchanged, and the others
5413            are all undefined.  */
5414         switch (s->cc_op) {
5415         case CC_OP_MULB ... CC_OP_MULQ:
5416         case CC_OP_ADDB ... CC_OP_ADDQ:
5417         case CC_OP_ADCB ... CC_OP_ADCQ:
5418         case CC_OP_SUBB ... CC_OP_SUBQ:
5419         case CC_OP_SBBB ... CC_OP_SBBQ:
5420         case CC_OP_LOGICB ... CC_OP_LOGICQ:
5421         case CC_OP_INCB ... CC_OP_INCQ:
5422         case CC_OP_DECB ... CC_OP_DECQ:
5423         case CC_OP_SHLB ... CC_OP_SHLQ:
5424         case CC_OP_SARB ... CC_OP_SARQ:
5425         case CC_OP_BMILGB ... CC_OP_BMILGQ:
5426             /* Z was going to be computed from the non-zero status of CC_DST.
5427                We can get that same Z value (and the new C value) by leaving
5428                CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5429                same width.  */
5430             tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
5431             set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
5432             break;
5433         default:
5434             /* Otherwise, generate EFLAGS and replace the C bit.  */
5435             gen_compute_eflags(s);
5436             tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
5437                                ctz32(CC_C), 1);
5438             break;
5439         }
5440         break;
5441     case 0x1bc: /* bsf / tzcnt */
5442     case 0x1bd: /* bsr / lzcnt */
5443         ot = dflag;
5444         modrm = x86_ldub_code(env, s);
5445         reg = ((modrm >> 3) & 7) | REX_R(s);
5446         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5447         gen_extu(ot, s->T0);
5448 
5449         /* Note that lzcnt and tzcnt are in different extensions.  */
5450         if ((prefixes & PREFIX_REPZ)
5451             && (b & 1
5452                 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
5453                 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
5454             int size = 8 << ot;
5455             /* For lzcnt/tzcnt, C bit is defined related to the input. */
5456             tcg_gen_mov_tl(cpu_cc_src, s->T0);
5457             if (b & 1) {
5458                 /* For lzcnt, reduce the target_ulong result by the
5459                    number of zeros that we expect to find at the top.  */
5460                 tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
5461                 tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
5462             } else {
5463                 /* For tzcnt, a zero input must return the operand size.  */
5464                 tcg_gen_ctzi_tl(s->T0, s->T0, size);
5465             }
5466             /* For lzcnt/tzcnt, Z bit is defined related to the result.  */
5467             gen_op_update1_cc(s);
5468             set_cc_op(s, CC_OP_BMILGB + ot);
5469         } else {
5470             /* For bsr/bsf, only the Z bit is defined and it is related
5471                to the input and not the result.  */
5472             tcg_gen_mov_tl(cpu_cc_dst, s->T0);
5473             set_cc_op(s, CC_OP_LOGICB + ot);
5474 
5475             /* ??? The manual says that the output is undefined when the
5476                input is zero, but real hardware leaves it unchanged, and
5477                real programs appear to depend on that.  Accomplish this
5478                by passing the output as the value to return upon zero.  */
5479             if (b & 1) {
5480                 /* For bsr, return the bit index of the first 1 bit,
5481                    not the count of leading zeros.  */
5482                 tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
5483                 tcg_gen_clz_tl(s->T0, s->T0, s->T1);
5484                 tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
5485             } else {
5486                 tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
5487             }
5488         }
5489         gen_op_mov_reg_v(s, ot, reg, s->T0);
5490         break;
5491         /************************/
5492         /* bcd */
5493     case 0x27: /* daa */
5494         if (CODE64(s))
5495             goto illegal_op;
5496         gen_update_cc_op(s);
5497         gen_helper_daa(tcg_env);
5498         set_cc_op(s, CC_OP_EFLAGS);
5499         break;
5500     case 0x2f: /* das */
5501         if (CODE64(s))
5502             goto illegal_op;
5503         gen_update_cc_op(s);
5504         gen_helper_das(tcg_env);
5505         set_cc_op(s, CC_OP_EFLAGS);
5506         break;
5507     case 0x37: /* aaa */
5508         if (CODE64(s))
5509             goto illegal_op;
5510         gen_update_cc_op(s);
5511         gen_helper_aaa(tcg_env);
5512         set_cc_op(s, CC_OP_EFLAGS);
5513         break;
5514     case 0x3f: /* aas */
5515         if (CODE64(s))
5516             goto illegal_op;
5517         gen_update_cc_op(s);
5518         gen_helper_aas(tcg_env);
5519         set_cc_op(s, CC_OP_EFLAGS);
5520         break;
5521     case 0xd4: /* aam */
5522         if (CODE64(s))
5523             goto illegal_op;
5524         val = x86_ldub_code(env, s);
5525         if (val == 0) {
5526             gen_exception(s, EXCP00_DIVZ);
5527         } else {
5528             gen_helper_aam(tcg_env, tcg_constant_i32(val));
5529             set_cc_op(s, CC_OP_LOGICB);
5530         }
5531         break;
5532     case 0xd5: /* aad */
5533         if (CODE64(s))
5534             goto illegal_op;
5535         val = x86_ldub_code(env, s);
5536         gen_helper_aad(tcg_env, tcg_constant_i32(val));
5537         set_cc_op(s, CC_OP_LOGICB);
5538         break;
5539         /************************/
5540         /* misc */
5541     case 0x90: /* nop */
5542         /* XXX: correct lock test for all insn */
5543         if (prefixes & PREFIX_LOCK) {
5544             goto illegal_op;
5545         }
5546         /* If REX_B is set, then this is xchg eax, r8d, not a nop.  */
5547         if (REX_B(s)) {
5548             goto do_xchg_reg_eax;
5549         }
5550         if (prefixes & PREFIX_REPZ) {
5551             gen_update_cc_op(s);
5552             gen_update_eip_cur(s);
5553             gen_helper_pause(tcg_env, cur_insn_len_i32(s));
5554             s->base.is_jmp = DISAS_NORETURN;
5555         }
5556         break;
5557     case 0x9b: /* fwait */
5558         if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
5559             (HF_MP_MASK | HF_TS_MASK)) {
5560             gen_exception(s, EXCP07_PREX);
5561         } else {
5562             /* needs to be treated as I/O because of ferr_irq */
5563             translator_io_start(&s->base);
5564             gen_helper_fwait(tcg_env);
5565         }
5566         break;
5567     case 0xcc: /* int3 */
5568         gen_interrupt(s, EXCP03_INT3);
5569         break;
5570     case 0xcd: /* int N */
5571         val = x86_ldub_code(env, s);
5572         if (check_vm86_iopl(s)) {
5573             gen_interrupt(s, val);
5574         }
5575         break;
5576     case 0xce: /* into */
5577         if (CODE64(s))
5578             goto illegal_op;
5579         gen_update_cc_op(s);
5580         gen_update_eip_cur(s);
5581         gen_helper_into(tcg_env, cur_insn_len_i32(s));
5582         break;
5583 #ifdef WANT_ICEBP
5584     case 0xf1: /* icebp (undocumented, exits to external debugger) */
5585         gen_svm_check_intercept(s, SVM_EXIT_ICEBP);
5586         gen_debug(s);
5587         break;
5588 #endif
5589     case 0xfa: /* cli */
5590         if (check_iopl(s)) {
5591             gen_reset_eflags(s, IF_MASK);
5592         }
5593         break;
5594     case 0xfb: /* sti */
5595         if (check_iopl(s)) {
5596             gen_set_eflags(s, IF_MASK);
5597             /* interruptions are enabled only the first insn after sti */
5598             gen_update_eip_next(s);
5599             gen_eob_inhibit_irq(s, true);
5600         }
5601         break;
5602     case 0x62: /* bound */
5603         if (CODE64(s))
5604             goto illegal_op;
5605         ot = dflag;
5606         modrm = x86_ldub_code(env, s);
5607         reg = (modrm >> 3) & 7;
5608         mod = (modrm >> 6) & 3;
5609         if (mod == 3)
5610             goto illegal_op;
5611         gen_op_mov_v_reg(s, ot, s->T0, reg);
5612         gen_lea_modrm(env, s, modrm);
5613         tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5614         if (ot == MO_16) {
5615             gen_helper_boundw(tcg_env, s->A0, s->tmp2_i32);
5616         } else {
5617             gen_helper_boundl(tcg_env, s->A0, s->tmp2_i32);
5618         }
5619         break;
5620     case 0x1c8 ... 0x1cf: /* bswap reg */
5621         reg = (b & 7) | REX_B(s);
5622 #ifdef TARGET_X86_64
5623         if (dflag == MO_64) {
5624             tcg_gen_bswap64_i64(cpu_regs[reg], cpu_regs[reg]);
5625             break;
5626         }
5627 #endif
5628         tcg_gen_bswap32_tl(cpu_regs[reg], cpu_regs[reg], TCG_BSWAP_OZ);
5629         break;
5630     case 0xd6: /* salc */
5631         if (CODE64(s))
5632             goto illegal_op;
5633         gen_compute_eflags_c(s, s->T0);
5634         tcg_gen_neg_tl(s->T0, s->T0);
5635         gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0);
5636         break;
5637     case 0xe0: /* loopnz */
5638     case 0xe1: /* loopz */
5639     case 0xe2: /* loop */
5640     case 0xe3: /* jecxz */
5641         {
5642             TCGLabel *l1, *l2;
5643             int diff = (int8_t)insn_get(env, s, MO_8);
5644 
5645             l1 = gen_new_label();
5646             l2 = gen_new_label();
5647             gen_update_cc_op(s);
5648             b &= 3;
5649             switch(b) {
5650             case 0: /* loopnz */
5651             case 1: /* loopz */
5652                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5653                 gen_op_jz_ecx(s, l2);
5654                 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
5655                 break;
5656             case 2: /* loop */
5657                 gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
5658                 gen_op_jnz_ecx(s, l1);
5659                 break;
5660             default:
5661             case 3: /* jcxz */
5662                 gen_op_jz_ecx(s, l1);
5663                 break;
5664             }
5665 
5666             gen_set_label(l2);
5667             gen_jmp_rel_csize(s, 0, 1);
5668 
5669             gen_set_label(l1);
5670             gen_jmp_rel(s, dflag, diff, 0);
5671         }
5672         break;
5673     case 0x130: /* wrmsr */
5674     case 0x132: /* rdmsr */
5675         if (check_cpl0(s)) {
5676             gen_update_cc_op(s);
5677             gen_update_eip_cur(s);
5678             if (b & 2) {
5679                 gen_helper_rdmsr(tcg_env);
5680             } else {
5681                 gen_helper_wrmsr(tcg_env);
5682                 s->base.is_jmp = DISAS_EOB_NEXT;
5683             }
5684         }
5685         break;
5686     case 0x131: /* rdtsc */
5687         gen_update_cc_op(s);
5688         gen_update_eip_cur(s);
5689         translator_io_start(&s->base);
5690         gen_helper_rdtsc(tcg_env);
5691         break;
5692     case 0x133: /* rdpmc */
5693         gen_update_cc_op(s);
5694         gen_update_eip_cur(s);
5695         gen_helper_rdpmc(tcg_env);
5696         s->base.is_jmp = DISAS_NORETURN;
5697         break;
5698     case 0x134: /* sysenter */
5699         /* For AMD SYSENTER is not valid in long mode */
5700         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5701             goto illegal_op;
5702         }
5703         if (!PE(s)) {
5704             gen_exception_gpf(s);
5705         } else {
5706             gen_helper_sysenter(tcg_env);
5707             s->base.is_jmp = DISAS_EOB_ONLY;
5708         }
5709         break;
5710     case 0x135: /* sysexit */
5711         /* For AMD SYSEXIT is not valid in long mode */
5712         if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
5713             goto illegal_op;
5714         }
5715         if (!PE(s) || CPL(s) != 0) {
5716             gen_exception_gpf(s);
5717         } else {
5718             gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
5719             s->base.is_jmp = DISAS_EOB_ONLY;
5720         }
5721         break;
5722     case 0x105: /* syscall */
5723         /* For Intel SYSCALL is only valid in long mode */
5724         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5725             goto illegal_op;
5726         }
5727         gen_update_cc_op(s);
5728         gen_update_eip_cur(s);
5729         gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
5730         /* TF handling for the syscall insn is different. The TF bit is  checked
5731            after the syscall insn completes. This allows #DB to not be
5732            generated after one has entered CPL0 if TF is set in FMASK.  */
5733         gen_eob_worker(s, false, true);
5734         break;
5735     case 0x107: /* sysret */
5736         /* For Intel SYSRET is only valid in long mode */
5737         if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
5738             goto illegal_op;
5739         }
5740         if (!PE(s) || CPL(s) != 0) {
5741             gen_exception_gpf(s);
5742         } else {
5743             gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
5744             /* condition codes are modified only in long mode */
5745             if (LMA(s)) {
5746                 set_cc_op(s, CC_OP_EFLAGS);
5747             }
5748             /* TF handling for the sysret insn is different. The TF bit is
5749                checked after the sysret insn completes. This allows #DB to be
5750                generated "as if" the syscall insn in userspace has just
5751                completed.  */
5752             gen_eob_worker(s, false, true);
5753         }
5754         break;
5755     case 0x1a2: /* cpuid */
5756         gen_update_cc_op(s);
5757         gen_update_eip_cur(s);
5758         gen_helper_cpuid(tcg_env);
5759         break;
5760     case 0xf4: /* hlt */
5761         if (check_cpl0(s)) {
5762             gen_update_cc_op(s);
5763             gen_update_eip_cur(s);
5764             gen_helper_hlt(tcg_env, cur_insn_len_i32(s));
5765             s->base.is_jmp = DISAS_NORETURN;
5766         }
5767         break;
5768     case 0x100:
5769         modrm = x86_ldub_code(env, s);
5770         mod = (modrm >> 6) & 3;
5771         op = (modrm >> 3) & 7;
5772         switch(op) {
5773         case 0: /* sldt */
5774             if (!PE(s) || VM86(s))
5775                 goto illegal_op;
5776             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5777                 break;
5778             }
5779             gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
5780             tcg_gen_ld32u_tl(s->T0, tcg_env,
5781                              offsetof(CPUX86State, ldt.selector));
5782             ot = mod == 3 ? dflag : MO_16;
5783             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5784             break;
5785         case 2: /* lldt */
5786             if (!PE(s) || VM86(s))
5787                 goto illegal_op;
5788             if (check_cpl0(s)) {
5789                 gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
5790                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5791                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5792                 gen_helper_lldt(tcg_env, s->tmp2_i32);
5793             }
5794             break;
5795         case 1: /* str */
5796             if (!PE(s) || VM86(s))
5797                 goto illegal_op;
5798             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5799                 break;
5800             }
5801             gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
5802             tcg_gen_ld32u_tl(s->T0, tcg_env,
5803                              offsetof(CPUX86State, tr.selector));
5804             ot = mod == 3 ? dflag : MO_16;
5805             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5806             break;
5807         case 3: /* ltr */
5808             if (!PE(s) || VM86(s))
5809                 goto illegal_op;
5810             if (check_cpl0(s)) {
5811                 gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
5812                 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5813                 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
5814                 gen_helper_ltr(tcg_env, s->tmp2_i32);
5815             }
5816             break;
5817         case 4: /* verr */
5818         case 5: /* verw */
5819             if (!PE(s) || VM86(s))
5820                 goto illegal_op;
5821             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5822             gen_update_cc_op(s);
5823             if (op == 4) {
5824                 gen_helper_verr(tcg_env, s->T0);
5825             } else {
5826                 gen_helper_verw(tcg_env, s->T0);
5827             }
5828             set_cc_op(s, CC_OP_EFLAGS);
5829             break;
5830         default:
5831             goto unknown_op;
5832         }
5833         break;
5834 
5835     case 0x101:
5836         modrm = x86_ldub_code(env, s);
5837         switch (modrm) {
5838         CASE_MODRM_MEM_OP(0): /* sgdt */
5839             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5840                 break;
5841             }
5842             gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
5843             gen_lea_modrm(env, s, modrm);
5844             tcg_gen_ld32u_tl(s->T0,
5845                              tcg_env, offsetof(CPUX86State, gdt.limit));
5846             gen_op_st_v(s, MO_16, s->T0, s->A0);
5847             gen_add_A0_im(s, 2);
5848             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
5849             if (dflag == MO_16) {
5850                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5851             }
5852             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5853             break;
5854 
5855         case 0xc8: /* monitor */
5856             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5857                 goto illegal_op;
5858             }
5859             gen_update_cc_op(s);
5860             gen_update_eip_cur(s);
5861             tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
5862             gen_add_A0_ds_seg(s);
5863             gen_helper_monitor(tcg_env, s->A0);
5864             break;
5865 
5866         case 0xc9: /* mwait */
5867             if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
5868                 goto illegal_op;
5869             }
5870             gen_update_cc_op(s);
5871             gen_update_eip_cur(s);
5872             gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
5873             s->base.is_jmp = DISAS_NORETURN;
5874             break;
5875 
5876         case 0xca: /* clac */
5877             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5878                 || CPL(s) != 0) {
5879                 goto illegal_op;
5880             }
5881             gen_reset_eflags(s, AC_MASK);
5882             s->base.is_jmp = DISAS_EOB_NEXT;
5883             break;
5884 
5885         case 0xcb: /* stac */
5886             if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
5887                 || CPL(s) != 0) {
5888                 goto illegal_op;
5889             }
5890             gen_set_eflags(s, AC_MASK);
5891             s->base.is_jmp = DISAS_EOB_NEXT;
5892             break;
5893 
5894         CASE_MODRM_MEM_OP(1): /* sidt */
5895             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
5896                 break;
5897             }
5898             gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
5899             gen_lea_modrm(env, s, modrm);
5900             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
5901             gen_op_st_v(s, MO_16, s->T0, s->A0);
5902             gen_add_A0_im(s, 2);
5903             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
5904             if (dflag == MO_16) {
5905                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
5906             }
5907             gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
5908             break;
5909 
5910         case 0xd0: /* xgetbv */
5911             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5912                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5913                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5914                 goto illegal_op;
5915             }
5916             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5917             gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
5918             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
5919             break;
5920 
5921         case 0xd1: /* xsetbv */
5922             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
5923                 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
5924                                  | PREFIX_REPZ | PREFIX_REPNZ))) {
5925                 goto illegal_op;
5926             }
5927             gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
5928             if (!check_cpl0(s)) {
5929                 break;
5930             }
5931             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
5932                                   cpu_regs[R_EDX]);
5933             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
5934             gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
5935             /* End TB because translation flags may change.  */
5936             s->base.is_jmp = DISAS_EOB_NEXT;
5937             break;
5938 
5939         case 0xd8: /* VMRUN */
5940             if (!SVME(s) || !PE(s)) {
5941                 goto illegal_op;
5942             }
5943             if (!check_cpl0(s)) {
5944                 break;
5945             }
5946             gen_update_cc_op(s);
5947             gen_update_eip_cur(s);
5948             gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
5949                              cur_insn_len_i32(s));
5950             tcg_gen_exit_tb(NULL, 0);
5951             s->base.is_jmp = DISAS_NORETURN;
5952             break;
5953 
5954         case 0xd9: /* VMMCALL */
5955             if (!SVME(s)) {
5956                 goto illegal_op;
5957             }
5958             gen_update_cc_op(s);
5959             gen_update_eip_cur(s);
5960             gen_helper_vmmcall(tcg_env);
5961             break;
5962 
5963         case 0xda: /* VMLOAD */
5964             if (!SVME(s) || !PE(s)) {
5965                 goto illegal_op;
5966             }
5967             if (!check_cpl0(s)) {
5968                 break;
5969             }
5970             gen_update_cc_op(s);
5971             gen_update_eip_cur(s);
5972             gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
5973             break;
5974 
5975         case 0xdb: /* VMSAVE */
5976             if (!SVME(s) || !PE(s)) {
5977                 goto illegal_op;
5978             }
5979             if (!check_cpl0(s)) {
5980                 break;
5981             }
5982             gen_update_cc_op(s);
5983             gen_update_eip_cur(s);
5984             gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
5985             break;
5986 
5987         case 0xdc: /* STGI */
5988             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
5989                 || !PE(s)) {
5990                 goto illegal_op;
5991             }
5992             if (!check_cpl0(s)) {
5993                 break;
5994             }
5995             gen_update_cc_op(s);
5996             gen_helper_stgi(tcg_env);
5997             s->base.is_jmp = DISAS_EOB_NEXT;
5998             break;
5999 
6000         case 0xdd: /* CLGI */
6001             if (!SVME(s) || !PE(s)) {
6002                 goto illegal_op;
6003             }
6004             if (!check_cpl0(s)) {
6005                 break;
6006             }
6007             gen_update_cc_op(s);
6008             gen_update_eip_cur(s);
6009             gen_helper_clgi(tcg_env);
6010             break;
6011 
6012         case 0xde: /* SKINIT */
6013             if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
6014                 || !PE(s)) {
6015                 goto illegal_op;
6016             }
6017             gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
6018             /* If not intercepted, not implemented -- raise #UD. */
6019             goto illegal_op;
6020 
6021         case 0xdf: /* INVLPGA */
6022             if (!SVME(s) || !PE(s)) {
6023                 goto illegal_op;
6024             }
6025             if (!check_cpl0(s)) {
6026                 break;
6027             }
6028             gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
6029             if (s->aflag == MO_64) {
6030                 tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
6031             } else {
6032                 tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
6033             }
6034             gen_helper_flush_page(tcg_env, s->A0);
6035             s->base.is_jmp = DISAS_EOB_NEXT;
6036             break;
6037 
6038         CASE_MODRM_MEM_OP(2): /* lgdt */
6039             if (!check_cpl0(s)) {
6040                 break;
6041             }
6042             gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
6043             gen_lea_modrm(env, s, modrm);
6044             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6045             gen_add_A0_im(s, 2);
6046             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6047             if (dflag == MO_16) {
6048                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6049             }
6050             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
6051             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
6052             break;
6053 
6054         CASE_MODRM_MEM_OP(3): /* lidt */
6055             if (!check_cpl0(s)) {
6056                 break;
6057             }
6058             gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
6059             gen_lea_modrm(env, s, modrm);
6060             gen_op_ld_v(s, MO_16, s->T1, s->A0);
6061             gen_add_A0_im(s, 2);
6062             gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
6063             if (dflag == MO_16) {
6064                 tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
6065             }
6066             tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
6067             tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
6068             break;
6069 
6070         CASE_MODRM_OP(4): /* smsw */
6071             if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
6072                 break;
6073             }
6074             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
6075             tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
6076             /*
6077              * In 32-bit mode, the higher 16 bits of the destination
6078              * register are undefined.  In practice CR0[31:0] is stored
6079              * just like in 64-bit mode.
6080              */
6081             mod = (modrm >> 6) & 3;
6082             ot = (mod != 3 ? MO_16 : s->dflag);
6083             gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6084             break;
6085         case 0xee: /* rdpkru */
6086             if (prefixes & PREFIX_LOCK) {
6087                 goto illegal_op;
6088             }
6089             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6090             gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
6091             tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
6092             break;
6093         case 0xef: /* wrpkru */
6094             if (prefixes & PREFIX_LOCK) {
6095                 goto illegal_op;
6096             }
6097             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6098                                   cpu_regs[R_EDX]);
6099             tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
6100             gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
6101             break;
6102 
6103         CASE_MODRM_OP(6): /* lmsw */
6104             if (!check_cpl0(s)) {
6105                 break;
6106             }
6107             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6108             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6109             /*
6110              * Only the 4 lower bits of CR0 are modified.
6111              * PE cannot be set to zero if already set to one.
6112              */
6113             tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
6114             tcg_gen_andi_tl(s->T0, s->T0, 0xf);
6115             tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
6116             tcg_gen_or_tl(s->T0, s->T0, s->T1);
6117             gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
6118             s->base.is_jmp = DISAS_EOB_NEXT;
6119             break;
6120 
6121         CASE_MODRM_MEM_OP(7): /* invlpg */
6122             if (!check_cpl0(s)) {
6123                 break;
6124             }
6125             gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
6126             gen_lea_modrm(env, s, modrm);
6127             gen_helper_flush_page(tcg_env, s->A0);
6128             s->base.is_jmp = DISAS_EOB_NEXT;
6129             break;
6130 
6131         case 0xf8: /* swapgs */
6132 #ifdef TARGET_X86_64
6133             if (CODE64(s)) {
6134                 if (check_cpl0(s)) {
6135                     tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
6136                     tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
6137                                   offsetof(CPUX86State, kernelgsbase));
6138                     tcg_gen_st_tl(s->T0, tcg_env,
6139                                   offsetof(CPUX86State, kernelgsbase));
6140                 }
6141                 break;
6142             }
6143 #endif
6144             goto illegal_op;
6145 
6146         case 0xf9: /* rdtscp */
6147             if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
6148                 goto illegal_op;
6149             }
6150             gen_update_cc_op(s);
6151             gen_update_eip_cur(s);
6152             translator_io_start(&s->base);
6153             gen_helper_rdtsc(tcg_env);
6154             gen_helper_rdpid(s->T0, tcg_env);
6155             gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
6156             break;
6157 
6158         default:
6159             goto unknown_op;
6160         }
6161         break;
6162 
6163     case 0x108: /* invd */
6164     case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6165         if (check_cpl0(s)) {
6166             gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
6167             /* nothing to do */
6168         }
6169         break;
6170     case 0x63: /* arpl or movslS (x86_64) */
6171 #ifdef TARGET_X86_64
6172         if (CODE64(s)) {
6173             int d_ot;
6174             /* d_ot is the size of destination */
6175             d_ot = dflag;
6176 
6177             modrm = x86_ldub_code(env, s);
6178             reg = ((modrm >> 3) & 7) | REX_R(s);
6179             mod = (modrm >> 6) & 3;
6180             rm = (modrm & 7) | REX_B(s);
6181 
6182             if (mod == 3) {
6183                 gen_op_mov_v_reg(s, MO_32, s->T0, rm);
6184                 /* sign extend */
6185                 if (d_ot == MO_64) {
6186                     tcg_gen_ext32s_tl(s->T0, s->T0);
6187                 }
6188                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6189             } else {
6190                 gen_lea_modrm(env, s, modrm);
6191                 gen_op_ld_v(s, MO_32 | MO_SIGN, s->T0, s->A0);
6192                 gen_op_mov_reg_v(s, d_ot, reg, s->T0);
6193             }
6194         } else
6195 #endif
6196         {
6197             TCGLabel *label1;
6198             TCGv t0, t1, t2;
6199 
6200             if (!PE(s) || VM86(s))
6201                 goto illegal_op;
6202             t0 = tcg_temp_new();
6203             t1 = tcg_temp_new();
6204             t2 = tcg_temp_new();
6205             ot = MO_16;
6206             modrm = x86_ldub_code(env, s);
6207             reg = (modrm >> 3) & 7;
6208             mod = (modrm >> 6) & 3;
6209             rm = modrm & 7;
6210             if (mod != 3) {
6211                 gen_lea_modrm(env, s, modrm);
6212                 gen_op_ld_v(s, ot, t0, s->A0);
6213             } else {
6214                 gen_op_mov_v_reg(s, ot, t0, rm);
6215             }
6216             gen_op_mov_v_reg(s, ot, t1, reg);
6217             tcg_gen_andi_tl(s->tmp0, t0, 3);
6218             tcg_gen_andi_tl(t1, t1, 3);
6219             tcg_gen_movi_tl(t2, 0);
6220             label1 = gen_new_label();
6221             tcg_gen_brcond_tl(TCG_COND_GE, s->tmp0, t1, label1);
6222             tcg_gen_andi_tl(t0, t0, ~3);
6223             tcg_gen_or_tl(t0, t0, t1);
6224             tcg_gen_movi_tl(t2, CC_Z);
6225             gen_set_label(label1);
6226             if (mod != 3) {
6227                 gen_op_st_v(s, ot, t0, s->A0);
6228            } else {
6229                 gen_op_mov_reg_v(s, ot, rm, t0);
6230             }
6231             gen_compute_eflags(s);
6232             tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
6233             tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
6234         }
6235         break;
6236     case 0x102: /* lar */
6237     case 0x103: /* lsl */
6238         {
6239             TCGLabel *label1;
6240             TCGv t0;
6241             if (!PE(s) || VM86(s))
6242                 goto illegal_op;
6243             ot = dflag != MO_16 ? MO_32 : MO_16;
6244             modrm = x86_ldub_code(env, s);
6245             reg = ((modrm >> 3) & 7) | REX_R(s);
6246             gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6247             t0 = tcg_temp_new();
6248             gen_update_cc_op(s);
6249             if (b == 0x102) {
6250                 gen_helper_lar(t0, tcg_env, s->T0);
6251             } else {
6252                 gen_helper_lsl(t0, tcg_env, s->T0);
6253             }
6254             tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
6255             label1 = gen_new_label();
6256             tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
6257             gen_op_mov_reg_v(s, ot, reg, t0);
6258             gen_set_label(label1);
6259             set_cc_op(s, CC_OP_EFLAGS);
6260         }
6261         break;
6262     case 0x118:
6263         modrm = x86_ldub_code(env, s);
6264         mod = (modrm >> 6) & 3;
6265         op = (modrm >> 3) & 7;
6266         switch(op) {
6267         case 0: /* prefetchnta */
6268         case 1: /* prefetchnt0 */
6269         case 2: /* prefetchnt0 */
6270         case 3: /* prefetchnt0 */
6271             if (mod == 3)
6272                 goto illegal_op;
6273             gen_nop_modrm(env, s, modrm);
6274             /* nothing more to do */
6275             break;
6276         default: /* nop (multi byte) */
6277             gen_nop_modrm(env, s, modrm);
6278             break;
6279         }
6280         break;
6281     case 0x11a:
6282         modrm = x86_ldub_code(env, s);
6283         if (s->flags & HF_MPX_EN_MASK) {
6284             mod = (modrm >> 6) & 3;
6285             reg = ((modrm >> 3) & 7) | REX_R(s);
6286             if (prefixes & PREFIX_REPZ) {
6287                 /* bndcl */
6288                 if (reg >= 4
6289                     || (prefixes & PREFIX_LOCK)
6290                     || s->aflag == MO_16) {
6291                     goto illegal_op;
6292                 }
6293                 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
6294             } else if (prefixes & PREFIX_REPNZ) {
6295                 /* bndcu */
6296                 if (reg >= 4
6297                     || (prefixes & PREFIX_LOCK)
6298                     || s->aflag == MO_16) {
6299                     goto illegal_op;
6300                 }
6301                 TCGv_i64 notu = tcg_temp_new_i64();
6302                 tcg_gen_not_i64(notu, cpu_bndu[reg]);
6303                 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
6304             } else if (prefixes & PREFIX_DATA) {
6305                 /* bndmov -- from reg/mem */
6306                 if (reg >= 4 || s->aflag == MO_16) {
6307                     goto illegal_op;
6308                 }
6309                 if (mod == 3) {
6310                     int reg2 = (modrm & 7) | REX_B(s);
6311                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6312                         goto illegal_op;
6313                     }
6314                     if (s->flags & HF_MPX_IU_MASK) {
6315                         tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
6316                         tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
6317                     }
6318                 } else {
6319                     gen_lea_modrm(env, s, modrm);
6320                     if (CODE64(s)) {
6321                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6322                                             s->mem_index, MO_LEUQ);
6323                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6324                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6325                                             s->mem_index, MO_LEUQ);
6326                     } else {
6327                         tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
6328                                             s->mem_index, MO_LEUL);
6329                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6330                         tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
6331                                             s->mem_index, MO_LEUL);
6332                     }
6333                     /* bnd registers are now in-use */
6334                     gen_set_hflag(s, HF_MPX_IU_MASK);
6335                 }
6336             } else if (mod != 3) {
6337                 /* bndldx */
6338                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6339                 if (reg >= 4
6340                     || (prefixes & PREFIX_LOCK)
6341                     || s->aflag == MO_16
6342                     || a.base < -1) {
6343                     goto illegal_op;
6344                 }
6345                 if (a.base >= 0) {
6346                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6347                 } else {
6348                     tcg_gen_movi_tl(s->A0, 0);
6349                 }
6350                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6351                 if (a.index >= 0) {
6352                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6353                 } else {
6354                     tcg_gen_movi_tl(s->T0, 0);
6355                 }
6356                 if (CODE64(s)) {
6357                     gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
6358                     tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
6359                                    offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
6360                 } else {
6361                     gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
6362                     tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
6363                     tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
6364                 }
6365                 gen_set_hflag(s, HF_MPX_IU_MASK);
6366             }
6367         }
6368         gen_nop_modrm(env, s, modrm);
6369         break;
6370     case 0x11b:
6371         modrm = x86_ldub_code(env, s);
6372         if (s->flags & HF_MPX_EN_MASK) {
6373             mod = (modrm >> 6) & 3;
6374             reg = ((modrm >> 3) & 7) | REX_R(s);
6375             if (mod != 3 && (prefixes & PREFIX_REPZ)) {
6376                 /* bndmk */
6377                 if (reg >= 4
6378                     || (prefixes & PREFIX_LOCK)
6379                     || s->aflag == MO_16) {
6380                     goto illegal_op;
6381                 }
6382                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6383                 if (a.base >= 0) {
6384                     tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
6385                     if (!CODE64(s)) {
6386                         tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
6387                     }
6388                 } else if (a.base == -1) {
6389                     /* no base register has lower bound of 0 */
6390                     tcg_gen_movi_i64(cpu_bndl[reg], 0);
6391                 } else {
6392                     /* rip-relative generates #ud */
6393                     goto illegal_op;
6394                 }
6395                 tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
6396                 if (!CODE64(s)) {
6397                     tcg_gen_ext32u_tl(s->A0, s->A0);
6398                 }
6399                 tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
6400                 /* bnd registers are now in-use */
6401                 gen_set_hflag(s, HF_MPX_IU_MASK);
6402                 break;
6403             } else if (prefixes & PREFIX_REPNZ) {
6404                 /* bndcn */
6405                 if (reg >= 4
6406                     || (prefixes & PREFIX_LOCK)
6407                     || s->aflag == MO_16) {
6408                     goto illegal_op;
6409                 }
6410                 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
6411             } else if (prefixes & PREFIX_DATA) {
6412                 /* bndmov -- to reg/mem */
6413                 if (reg >= 4 || s->aflag == MO_16) {
6414                     goto illegal_op;
6415                 }
6416                 if (mod == 3) {
6417                     int reg2 = (modrm & 7) | REX_B(s);
6418                     if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
6419                         goto illegal_op;
6420                     }
6421                     if (s->flags & HF_MPX_IU_MASK) {
6422                         tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
6423                         tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
6424                     }
6425                 } else {
6426                     gen_lea_modrm(env, s, modrm);
6427                     if (CODE64(s)) {
6428                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6429                                             s->mem_index, MO_LEUQ);
6430                         tcg_gen_addi_tl(s->A0, s->A0, 8);
6431                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6432                                             s->mem_index, MO_LEUQ);
6433                     } else {
6434                         tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
6435                                             s->mem_index, MO_LEUL);
6436                         tcg_gen_addi_tl(s->A0, s->A0, 4);
6437                         tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
6438                                             s->mem_index, MO_LEUL);
6439                     }
6440                 }
6441             } else if (mod != 3) {
6442                 /* bndstx */
6443                 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6444                 if (reg >= 4
6445                     || (prefixes & PREFIX_LOCK)
6446                     || s->aflag == MO_16
6447                     || a.base < -1) {
6448                     goto illegal_op;
6449                 }
6450                 if (a.base >= 0) {
6451                     tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
6452                 } else {
6453                     tcg_gen_movi_tl(s->A0, 0);
6454                 }
6455                 gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
6456                 if (a.index >= 0) {
6457                     tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
6458                 } else {
6459                     tcg_gen_movi_tl(s->T0, 0);
6460                 }
6461                 if (CODE64(s)) {
6462                     gen_helper_bndstx64(tcg_env, s->A0, s->T0,
6463                                         cpu_bndl[reg], cpu_bndu[reg]);
6464                 } else {
6465                     gen_helper_bndstx32(tcg_env, s->A0, s->T0,
6466                                         cpu_bndl[reg], cpu_bndu[reg]);
6467                 }
6468             }
6469         }
6470         gen_nop_modrm(env, s, modrm);
6471         break;
6472     case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6473         modrm = x86_ldub_code(env, s);
6474         gen_nop_modrm(env, s, modrm);
6475         break;
6476 
6477     case 0x120: /* mov reg, crN */
6478     case 0x122: /* mov crN, reg */
6479         if (!check_cpl0(s)) {
6480             break;
6481         }
6482         modrm = x86_ldub_code(env, s);
6483         /*
6484          * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6485          * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6486          * processors all show that the mod bits are assumed to be 1's,
6487          * regardless of actual values.
6488          */
6489         rm = (modrm & 7) | REX_B(s);
6490         reg = ((modrm >> 3) & 7) | REX_R(s);
6491         switch (reg) {
6492         case 0:
6493             if ((prefixes & PREFIX_LOCK) &&
6494                 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
6495                 reg = 8;
6496             }
6497             break;
6498         case 2:
6499         case 3:
6500         case 4:
6501         case 8:
6502             break;
6503         default:
6504             goto unknown_op;
6505         }
6506         ot  = (CODE64(s) ? MO_64 : MO_32);
6507 
6508         translator_io_start(&s->base);
6509         if (b & 2) {
6510             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
6511             gen_op_mov_v_reg(s, ot, s->T0, rm);
6512             gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
6513             s->base.is_jmp = DISAS_EOB_NEXT;
6514         } else {
6515             gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
6516             gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
6517             gen_op_mov_reg_v(s, ot, rm, s->T0);
6518         }
6519         break;
6520 
6521     case 0x121: /* mov reg, drN */
6522     case 0x123: /* mov drN, reg */
6523         if (check_cpl0(s)) {
6524             modrm = x86_ldub_code(env, s);
6525             /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6526              * AMD documentation (24594.pdf) and testing of
6527              * intel 386 and 486 processors all show that the mod bits
6528              * are assumed to be 1's, regardless of actual values.
6529              */
6530             rm = (modrm & 7) | REX_B(s);
6531             reg = ((modrm >> 3) & 7) | REX_R(s);
6532             if (CODE64(s))
6533                 ot = MO_64;
6534             else
6535                 ot = MO_32;
6536             if (reg >= 8) {
6537                 goto illegal_op;
6538             }
6539             if (b & 2) {
6540                 gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
6541                 gen_op_mov_v_reg(s, ot, s->T0, rm);
6542                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6543                 gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
6544                 s->base.is_jmp = DISAS_EOB_NEXT;
6545             } else {
6546                 gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
6547                 tcg_gen_movi_i32(s->tmp2_i32, reg);
6548                 gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
6549                 gen_op_mov_reg_v(s, ot, rm, s->T0);
6550             }
6551         }
6552         break;
6553     case 0x106: /* clts */
6554         if (check_cpl0(s)) {
6555             gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
6556             gen_helper_clts(tcg_env);
6557             /* abort block because static cpu state changed */
6558             s->base.is_jmp = DISAS_EOB_NEXT;
6559         }
6560         break;
6561     /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6562     case 0x1c3: /* MOVNTI reg, mem */
6563         if (!(s->cpuid_features & CPUID_SSE2))
6564             goto illegal_op;
6565         ot = mo_64_32(dflag);
6566         modrm = x86_ldub_code(env, s);
6567         mod = (modrm >> 6) & 3;
6568         if (mod == 3)
6569             goto illegal_op;
6570         reg = ((modrm >> 3) & 7) | REX_R(s);
6571         /* generate a generic store */
6572         gen_ldst_modrm(env, s, modrm, ot, reg, 1);
6573         break;
6574     case 0x1ae:
6575         modrm = x86_ldub_code(env, s);
6576         switch (modrm) {
6577         CASE_MODRM_MEM_OP(0): /* fxsave */
6578             if (!(s->cpuid_features & CPUID_FXSR)
6579                 || (prefixes & PREFIX_LOCK)) {
6580                 goto illegal_op;
6581             }
6582             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6583                 gen_exception(s, EXCP07_PREX);
6584                 break;
6585             }
6586             gen_lea_modrm(env, s, modrm);
6587             gen_helper_fxsave(tcg_env, s->A0);
6588             break;
6589 
6590         CASE_MODRM_MEM_OP(1): /* fxrstor */
6591             if (!(s->cpuid_features & CPUID_FXSR)
6592                 || (prefixes & PREFIX_LOCK)) {
6593                 goto illegal_op;
6594             }
6595             if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
6596                 gen_exception(s, EXCP07_PREX);
6597                 break;
6598             }
6599             gen_lea_modrm(env, s, modrm);
6600             gen_helper_fxrstor(tcg_env, s->A0);
6601             break;
6602 
6603         CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6604             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6605                 goto illegal_op;
6606             }
6607             if (s->flags & HF_TS_MASK) {
6608                 gen_exception(s, EXCP07_PREX);
6609                 break;
6610             }
6611             gen_lea_modrm(env, s, modrm);
6612             tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
6613             gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
6614             break;
6615 
6616         CASE_MODRM_MEM_OP(3): /* stmxcsr */
6617             if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
6618                 goto illegal_op;
6619             }
6620             if (s->flags & HF_TS_MASK) {
6621                 gen_exception(s, EXCP07_PREX);
6622                 break;
6623             }
6624             gen_helper_update_mxcsr(tcg_env);
6625             gen_lea_modrm(env, s, modrm);
6626             tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
6627             gen_op_st_v(s, MO_32, s->T0, s->A0);
6628             break;
6629 
6630         CASE_MODRM_MEM_OP(4): /* xsave */
6631             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6632                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6633                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6634                 goto illegal_op;
6635             }
6636             gen_lea_modrm(env, s, modrm);
6637             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6638                                   cpu_regs[R_EDX]);
6639             gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
6640             break;
6641 
6642         CASE_MODRM_MEM_OP(5): /* xrstor */
6643             if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6644                 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
6645                                 | PREFIX_REPZ | PREFIX_REPNZ))) {
6646                 goto illegal_op;
6647             }
6648             gen_lea_modrm(env, s, modrm);
6649             tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6650                                   cpu_regs[R_EDX]);
6651             gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
6652             /* XRSTOR is how MPX is enabled, which changes how
6653                we translate.  Thus we need to end the TB.  */
6654             s->base.is_jmp = DISAS_EOB_NEXT;
6655             break;
6656 
6657         CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6658             if (prefixes & PREFIX_LOCK) {
6659                 goto illegal_op;
6660             }
6661             if (prefixes & PREFIX_DATA) {
6662                 /* clwb */
6663                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
6664                     goto illegal_op;
6665                 }
6666                 gen_nop_modrm(env, s, modrm);
6667             } else {
6668                 /* xsaveopt */
6669                 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
6670                     || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
6671                     || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
6672                     goto illegal_op;
6673                 }
6674                 gen_lea_modrm(env, s, modrm);
6675                 tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
6676                                       cpu_regs[R_EDX]);
6677                 gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
6678             }
6679             break;
6680 
6681         CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6682             if (prefixes & PREFIX_LOCK) {
6683                 goto illegal_op;
6684             }
6685             if (prefixes & PREFIX_DATA) {
6686                 /* clflushopt */
6687                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
6688                     goto illegal_op;
6689                 }
6690             } else {
6691                 /* clflush */
6692                 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
6693                     || !(s->cpuid_features & CPUID_CLFLUSH)) {
6694                     goto illegal_op;
6695                 }
6696             }
6697             gen_nop_modrm(env, s, modrm);
6698             break;
6699 
6700         case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6701         case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6702         case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6703         case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6704             if (CODE64(s)
6705                 && (prefixes & PREFIX_REPZ)
6706                 && !(prefixes & PREFIX_LOCK)
6707                 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
6708                 TCGv base, treg, src, dst;
6709 
6710                 /* Preserve hflags bits by testing CR4 at runtime.  */
6711                 tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
6712                 gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
6713 
6714                 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
6715                 treg = cpu_regs[(modrm & 7) | REX_B(s)];
6716 
6717                 if (modrm & 0x10) {
6718                     /* wr*base */
6719                     dst = base, src = treg;
6720                 } else {
6721                     /* rd*base */
6722                     dst = treg, src = base;
6723                 }
6724 
6725                 if (s->dflag == MO_32) {
6726                     tcg_gen_ext32u_tl(dst, src);
6727                 } else {
6728                     tcg_gen_mov_tl(dst, src);
6729                 }
6730                 break;
6731             }
6732             goto unknown_op;
6733 
6734         case 0xf8: /* sfence / pcommit */
6735             if (prefixes & PREFIX_DATA) {
6736                 /* pcommit */
6737                 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
6738                     || (prefixes & PREFIX_LOCK)) {
6739                     goto illegal_op;
6740                 }
6741                 break;
6742             }
6743             /* fallthru */
6744         case 0xf9 ... 0xff: /* sfence */
6745             if (!(s->cpuid_features & CPUID_SSE)
6746                 || (prefixes & PREFIX_LOCK)) {
6747                 goto illegal_op;
6748             }
6749             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
6750             break;
6751         case 0xe8 ... 0xef: /* lfence */
6752             if (!(s->cpuid_features & CPUID_SSE)
6753                 || (prefixes & PREFIX_LOCK)) {
6754                 goto illegal_op;
6755             }
6756             tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
6757             break;
6758         case 0xf0 ... 0xf7: /* mfence */
6759             if (!(s->cpuid_features & CPUID_SSE2)
6760                 || (prefixes & PREFIX_LOCK)) {
6761                 goto illegal_op;
6762             }
6763             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
6764             break;
6765 
6766         default:
6767             goto unknown_op;
6768         }
6769         break;
6770 
6771     case 0x10d: /* 3DNow! prefetch(w) */
6772         modrm = x86_ldub_code(env, s);
6773         mod = (modrm >> 6) & 3;
6774         if (mod == 3)
6775             goto illegal_op;
6776         gen_nop_modrm(env, s, modrm);
6777         break;
6778     case 0x1aa: /* rsm */
6779         gen_svm_check_intercept(s, SVM_EXIT_RSM);
6780         if (!(s->flags & HF_SMM_MASK))
6781             goto illegal_op;
6782 #ifdef CONFIG_USER_ONLY
6783         /* we should not be in SMM mode */
6784         g_assert_not_reached();
6785 #else
6786         gen_update_cc_op(s);
6787         gen_update_eip_next(s);
6788         gen_helper_rsm(tcg_env);
6789 #endif /* CONFIG_USER_ONLY */
6790         s->base.is_jmp = DISAS_EOB_ONLY;
6791         break;
6792     case 0x1b8: /* SSE4.2 popcnt */
6793         if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
6794              PREFIX_REPZ)
6795             goto illegal_op;
6796         if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
6797             goto illegal_op;
6798 
6799         modrm = x86_ldub_code(env, s);
6800         reg = ((modrm >> 3) & 7) | REX_R(s);
6801 
6802         if (s->prefix & PREFIX_DATA) {
6803             ot = MO_16;
6804         } else {
6805             ot = mo_64_32(dflag);
6806         }
6807 
6808         gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6809         gen_extu(ot, s->T0);
6810         tcg_gen_mov_tl(cpu_cc_src, s->T0);
6811         tcg_gen_ctpop_tl(s->T0, s->T0);
6812         gen_op_mov_reg_v(s, ot, reg, s->T0);
6813 
6814         set_cc_op(s, CC_OP_POPCNT);
6815         break;
6816     case 0x10e ... 0x117:
6817     case 0x128 ... 0x12f:
6818     case 0x138 ... 0x13a:
6819     case 0x150 ... 0x179:
6820     case 0x17c ... 0x17f:
6821     case 0x1c2:
6822     case 0x1c4 ... 0x1c6:
6823     case 0x1d0 ... 0x1fe:
6824         disas_insn_new(s, cpu, b);
6825         break;
6826     default:
6827         goto unknown_op;
6828     }
6829     return true;
6830  illegal_op:
6831     gen_illegal_opcode(s);
6832     return true;
6833  unknown_op:
6834     gen_unknown_opcode(env, s);
6835     return true;
6836 }
6837 
6838 void tcg_x86_init(void)
6839 {
6840     static const char reg_names[CPU_NB_REGS][4] = {
6841 #ifdef TARGET_X86_64
6842         [R_EAX] = "rax",
6843         [R_EBX] = "rbx",
6844         [R_ECX] = "rcx",
6845         [R_EDX] = "rdx",
6846         [R_ESI] = "rsi",
6847         [R_EDI] = "rdi",
6848         [R_EBP] = "rbp",
6849         [R_ESP] = "rsp",
6850         [8]  = "r8",
6851         [9]  = "r9",
6852         [10] = "r10",
6853         [11] = "r11",
6854         [12] = "r12",
6855         [13] = "r13",
6856         [14] = "r14",
6857         [15] = "r15",
6858 #else
6859         [R_EAX] = "eax",
6860         [R_EBX] = "ebx",
6861         [R_ECX] = "ecx",
6862         [R_EDX] = "edx",
6863         [R_ESI] = "esi",
6864         [R_EDI] = "edi",
6865         [R_EBP] = "ebp",
6866         [R_ESP] = "esp",
6867 #endif
6868     };
6869     static const char eip_name[] = {
6870 #ifdef TARGET_X86_64
6871         "rip"
6872 #else
6873         "eip"
6874 #endif
6875     };
6876     static const char seg_base_names[6][8] = {
6877         [R_CS] = "cs_base",
6878         [R_DS] = "ds_base",
6879         [R_ES] = "es_base",
6880         [R_FS] = "fs_base",
6881         [R_GS] = "gs_base",
6882         [R_SS] = "ss_base",
6883     };
6884     static const char bnd_regl_names[4][8] = {
6885         "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6886     };
6887     static const char bnd_regu_names[4][8] = {
6888         "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6889     };
6890     int i;
6891 
6892     cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
6893                                        offsetof(CPUX86State, cc_op), "cc_op");
6894     cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
6895                                     "cc_dst");
6896     cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
6897                                     "cc_src");
6898     cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
6899                                      "cc_src2");
6900     cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
6901 
6902     for (i = 0; i < CPU_NB_REGS; ++i) {
6903         cpu_regs[i] = tcg_global_mem_new(tcg_env,
6904                                          offsetof(CPUX86State, regs[i]),
6905                                          reg_names[i]);
6906     }
6907 
6908     for (i = 0; i < 6; ++i) {
6909         cpu_seg_base[i]
6910             = tcg_global_mem_new(tcg_env,
6911                                  offsetof(CPUX86State, segs[i].base),
6912                                  seg_base_names[i]);
6913     }
6914 
6915     for (i = 0; i < 4; ++i) {
6916         cpu_bndl[i]
6917             = tcg_global_mem_new_i64(tcg_env,
6918                                      offsetof(CPUX86State, bnd_regs[i].lb),
6919                                      bnd_regl_names[i]);
6920         cpu_bndu[i]
6921             = tcg_global_mem_new_i64(tcg_env,
6922                                      offsetof(CPUX86State, bnd_regs[i].ub),
6923                                      bnd_regu_names[i]);
6924     }
6925 }
6926 
6927 static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6928 {
6929     DisasContext *dc = container_of(dcbase, DisasContext, base);
6930     CPUX86State *env = cpu_env(cpu);
6931     uint32_t flags = dc->base.tb->flags;
6932     uint32_t cflags = tb_cflags(dc->base.tb);
6933     int cpl = (flags >> HF_CPL_SHIFT) & 3;
6934     int iopl = (flags >> IOPL_SHIFT) & 3;
6935 
6936     dc->cs_base = dc->base.tb->cs_base;
6937     dc->pc_save = dc->base.pc_next;
6938     dc->flags = flags;
6939 #ifndef CONFIG_USER_ONLY
6940     dc->cpl = cpl;
6941     dc->iopl = iopl;
6942 #endif
6943 
6944     /* We make some simplifying assumptions; validate they're correct. */
6945     g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
6946     g_assert(CPL(dc) == cpl);
6947     g_assert(IOPL(dc) == iopl);
6948     g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
6949     g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
6950     g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
6951     g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
6952     g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
6953     g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
6954     g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
6955     g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
6956 
6957     dc->cc_op = CC_OP_DYNAMIC;
6958     dc->cc_op_dirty = false;
6959     dc->popl_esp_hack = 0;
6960     /* select memory access functions */
6961     dc->mem_index = cpu_mmu_index(cpu, false);
6962     dc->cpuid_features = env->features[FEAT_1_EDX];
6963     dc->cpuid_ext_features = env->features[FEAT_1_ECX];
6964     dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
6965     dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
6966     dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
6967     dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
6968     dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
6969     dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
6970     dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
6971                     (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
6972     /*
6973      * If jmp_opt, we want to handle each string instruction individually.
6974      * For icount also disable repz optimization so that each iteration
6975      * is accounted separately.
6976      */
6977     dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
6978 
6979     dc->T0 = tcg_temp_new();
6980     dc->T1 = tcg_temp_new();
6981     dc->A0 = tcg_temp_new();
6982 
6983     dc->tmp0 = tcg_temp_new();
6984     dc->tmp1_i64 = tcg_temp_new_i64();
6985     dc->tmp2_i32 = tcg_temp_new_i32();
6986     dc->tmp3_i32 = tcg_temp_new_i32();
6987     dc->tmp4 = tcg_temp_new();
6988     dc->cc_srcT = tcg_temp_new();
6989 }
6990 
6991 static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
6992 {
6993 }
6994 
6995 static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6996 {
6997     DisasContext *dc = container_of(dcbase, DisasContext, base);
6998     target_ulong pc_arg = dc->base.pc_next;
6999 
7000     dc->prev_insn_start = dc->base.insn_start;
7001     dc->prev_insn_end = tcg_last_op();
7002     if (tb_cflags(dcbase->tb) & CF_PCREL) {
7003         pc_arg &= ~TARGET_PAGE_MASK;
7004     }
7005     tcg_gen_insn_start(pc_arg, dc->cc_op);
7006 }
7007 
7008 static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
7009 {
7010     DisasContext *dc = container_of(dcbase, DisasContext, base);
7011 
7012 #ifdef TARGET_VSYSCALL_PAGE
7013     /*
7014      * Detect entry into the vsyscall page and invoke the syscall.
7015      */
7016     if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
7017         gen_exception(dc, EXCP_VSYSCALL);
7018         dc->base.pc_next = dc->pc + 1;
7019         return;
7020     }
7021 #endif
7022 
7023     if (disas_insn(dc, cpu)) {
7024         target_ulong pc_next = dc->pc;
7025         dc->base.pc_next = pc_next;
7026 
7027         if (dc->base.is_jmp == DISAS_NEXT) {
7028             if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
7029                 /*
7030                  * If single step mode, we generate only one instruction and
7031                  * generate an exception.
7032                  * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7033                  * the flag and abort the translation to give the irqs a
7034                  * chance to happen.
7035                  */
7036                 dc->base.is_jmp = DISAS_EOB_NEXT;
7037             } else if (!is_same_page(&dc->base, pc_next)) {
7038                 dc->base.is_jmp = DISAS_TOO_MANY;
7039             }
7040         }
7041     }
7042 }
7043 
7044 static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
7045 {
7046     DisasContext *dc = container_of(dcbase, DisasContext, base);
7047 
7048     switch (dc->base.is_jmp) {
7049     case DISAS_NORETURN:
7050         break;
7051     case DISAS_TOO_MANY:
7052         gen_update_cc_op(dc);
7053         gen_jmp_rel_csize(dc, 0, 0);
7054         break;
7055     case DISAS_EOB_NEXT:
7056         gen_update_cc_op(dc);
7057         gen_update_eip_cur(dc);
7058         /* fall through */
7059     case DISAS_EOB_ONLY:
7060         gen_eob(dc);
7061         break;
7062     case DISAS_EOB_INHIBIT_IRQ:
7063         gen_update_cc_op(dc);
7064         gen_update_eip_cur(dc);
7065         gen_eob_inhibit_irq(dc, true);
7066         break;
7067     case DISAS_JUMP:
7068         gen_jr(dc);
7069         break;
7070     default:
7071         g_assert_not_reached();
7072     }
7073 }
7074 
7075 static void i386_tr_disas_log(const DisasContextBase *dcbase,
7076                               CPUState *cpu, FILE *logfile)
7077 {
7078     DisasContext *dc = container_of(dcbase, DisasContext, base);
7079 
7080     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
7081     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
7082 }
7083 
7084 static const TranslatorOps i386_tr_ops = {
7085     .init_disas_context = i386_tr_init_disas_context,
7086     .tb_start           = i386_tr_tb_start,
7087     .insn_start         = i386_tr_insn_start,
7088     .translate_insn     = i386_tr_translate_insn,
7089     .tb_stop            = i386_tr_tb_stop,
7090     .disas_log          = i386_tr_disas_log,
7091 };
7092 
7093 /* generate intermediate code for basic block 'tb'.  */
7094 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
7095                            vaddr pc, void *host_pc)
7096 {
7097     DisasContext dc;
7098 
7099     translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
7100 }
7101