xref: /qemu/target/i386/tcg/seg_helper.c (revision f917eed3)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 
30 //#define DEBUG_PCALL
31 
32 #ifdef DEBUG_PCALL
33 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
34 # define LOG_PCALL_STATE(cpu)                                  \
35     log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 #else
37 # define LOG_PCALL(...) do { } while (0)
38 # define LOG_PCALL_STATE(cpu) do { } while (0)
39 #endif
40 
41 /*
42  * TODO: Convert callers to compute cpu_mmu_index_kernel once
43  * and use *_mmuidx_ra directly.
44  */
45 #define cpu_ldub_kernel_ra(e, p, r) \
46     cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
47 #define cpu_lduw_kernel_ra(e, p, r) \
48     cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
49 #define cpu_ldl_kernel_ra(e, p, r) \
50     cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
51 #define cpu_ldq_kernel_ra(e, p, r) \
52     cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
53 
54 #define cpu_stb_kernel_ra(e, p, v, r) \
55     cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
56 #define cpu_stw_kernel_ra(e, p, v, r) \
57     cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
58 #define cpu_stl_kernel_ra(e, p, v, r) \
59     cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
60 #define cpu_stq_kernel_ra(e, p, v, r) \
61     cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
62 
63 #define cpu_ldub_kernel(e, p)    cpu_ldub_kernel_ra(e, p, 0)
64 #define cpu_lduw_kernel(e, p)    cpu_lduw_kernel_ra(e, p, 0)
65 #define cpu_ldl_kernel(e, p)     cpu_ldl_kernel_ra(e, p, 0)
66 #define cpu_ldq_kernel(e, p)     cpu_ldq_kernel_ra(e, p, 0)
67 
68 #define cpu_stb_kernel(e, p, v)  cpu_stb_kernel_ra(e, p, v, 0)
69 #define cpu_stw_kernel(e, p, v)  cpu_stw_kernel_ra(e, p, v, 0)
70 #define cpu_stl_kernel(e, p, v)  cpu_stl_kernel_ra(e, p, v, 0)
71 #define cpu_stq_kernel(e, p, v)  cpu_stq_kernel_ra(e, p, v, 0)
72 
73 /* return non zero if error */
74 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
75                                uint32_t *e2_ptr, int selector,
76                                uintptr_t retaddr)
77 {
78     SegmentCache *dt;
79     int index;
80     target_ulong ptr;
81 
82     if (selector & 0x4) {
83         dt = &env->ldt;
84     } else {
85         dt = &env->gdt;
86     }
87     index = selector & ~7;
88     if ((index + 7) > dt->limit) {
89         return -1;
90     }
91     ptr = dt->base + index;
92     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
93     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
94     return 0;
95 }
96 
97 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
98                                uint32_t *e2_ptr, int selector)
99 {
100     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
101 }
102 
103 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104 {
105     unsigned int limit;
106 
107     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
108     if (e2 & DESC_G_MASK) {
109         limit = (limit << 12) | 0xfff;
110     }
111     return limit;
112 }
113 
114 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115 {
116     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
117 }
118 
119 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
120                                          uint32_t e2)
121 {
122     sc->base = get_seg_base(e1, e2);
123     sc->limit = get_seg_limit(e1, e2);
124     sc->flags = e2;
125 }
126 
127 /* init the segment cache in vm86 mode. */
128 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129 {
130     selector &= 0xffff;
131 
132     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
133                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
134                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
135 }
136 
137 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
138                                        uint32_t *esp_ptr, int dpl,
139                                        uintptr_t retaddr)
140 {
141     X86CPU *cpu = env_archcpu(env);
142     int type, index, shift;
143 
144 #if 0
145     {
146         int i;
147         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
148         for (i = 0; i < env->tr.limit; i++) {
149             printf("%02x ", env->tr.base[i]);
150             if ((i & 7) == 7) {
151                 printf("\n");
152             }
153         }
154         printf("\n");
155     }
156 #endif
157 
158     if (!(env->tr.flags & DESC_P_MASK)) {
159         cpu_abort(CPU(cpu), "invalid tss");
160     }
161     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
162     if ((type & 7) != 1) {
163         cpu_abort(CPU(cpu), "invalid tss type");
164     }
165     shift = type >> 3;
166     index = (dpl * 4 + 2) << shift;
167     if (index + (4 << shift) - 1 > env->tr.limit) {
168         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169     }
170     if (shift == 0) {
171         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
172         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
173     } else {
174         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
175         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
176     }
177 }
178 
179 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
180                          uintptr_t retaddr)
181 {
182     uint32_t e1, e2;
183     int rpl, dpl;
184 
185     if ((selector & 0xfffc) != 0) {
186         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
187             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188         }
189         if (!(e2 & DESC_S_MASK)) {
190             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191         }
192         rpl = selector & 3;
193         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
194         if (seg_reg == R_CS) {
195             if (!(e2 & DESC_CS_MASK)) {
196                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197             }
198             if (dpl != rpl) {
199                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200             }
201         } else if (seg_reg == R_SS) {
202             /* SS must be writable data */
203             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
204                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205             }
206             if (dpl != cpl || dpl != rpl) {
207                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208             }
209         } else {
210             /* not readable code */
211             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
212                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213             }
214             /* if data or non conforming code, checks the rights */
215             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
216                 if (dpl < cpl || dpl < rpl) {
217                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
218                 }
219             }
220         }
221         if (!(e2 & DESC_P_MASK)) {
222             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223         }
224         cpu_x86_load_seg_cache(env, seg_reg, selector,
225                                get_seg_base(e1, e2),
226                                get_seg_limit(e1, e2),
227                                e2);
228     } else {
229         if (seg_reg == R_SS || seg_reg == R_CS) {
230             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231         }
232     }
233 }
234 
235 #define SWITCH_TSS_JMP  0
236 #define SWITCH_TSS_IRET 1
237 #define SWITCH_TSS_CALL 2
238 
239 /* XXX: restore CPU state in registers (PowerPC case) */
240 static void switch_tss_ra(CPUX86State *env, int tss_selector,
241                           uint32_t e1, uint32_t e2, int source,
242                           uint32_t next_eip, uintptr_t retaddr)
243 {
244     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
245     target_ulong tss_base;
246     uint32_t new_regs[8], new_segs[6];
247     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
248     uint32_t old_eflags, eflags_mask;
249     SegmentCache *dt;
250     int index;
251     target_ulong ptr;
252 
253     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
254     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
255               source);
256 
257     /* if task gate, we read the TSS segment and we load it */
258     if (type == 5) {
259         if (!(e2 & DESC_P_MASK)) {
260             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261         }
262         tss_selector = e1 >> 16;
263         if (tss_selector & 4) {
264             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265         }
266         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
267             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268         }
269         if (e2 & DESC_S_MASK) {
270             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271         }
272         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
273         if ((type & 7) != 1) {
274             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
275         }
276     }
277 
278     if (!(e2 & DESC_P_MASK)) {
279         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
280     }
281 
282     if (type & 8) {
283         tss_limit_max = 103;
284     } else {
285         tss_limit_max = 43;
286     }
287     tss_limit = get_seg_limit(e1, e2);
288     tss_base = get_seg_base(e1, e2);
289     if ((tss_selector & 4) != 0 ||
290         tss_limit < tss_limit_max) {
291         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292     }
293     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
294     if (old_type & 8) {
295         old_tss_limit_max = 103;
296     } else {
297         old_tss_limit_max = 43;
298     }
299 
300     /* read all the registers from the new TSS */
301     if (type & 8) {
302         /* 32 bit */
303         new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
304         new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
305         new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
306         for (i = 0; i < 8; i++) {
307             new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
308                                             retaddr);
309         }
310         for (i = 0; i < 6; i++) {
311             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
312                                              retaddr);
313         }
314         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
315         new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
316     } else {
317         /* 16 bit */
318         new_cr3 = 0;
319         new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
320         new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
321         for (i = 0; i < 8; i++) {
322             new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
323                                              retaddr) | 0xffff0000;
324         }
325         for (i = 0; i < 4; i++) {
326             new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
327                                              retaddr);
328         }
329         new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
330         new_segs[R_FS] = 0;
331         new_segs[R_GS] = 0;
332         new_trap = 0;
333     }
334     /* XXX: avoid a compiler warning, see
335      http://support.amd.com/us/Processor_TechDocs/24593.pdf
336      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
337     (void)new_trap;
338 
339     /* NOTE: we must avoid memory exceptions during the task switch,
340        so we make dummy accesses before */
341     /* XXX: it can still fail in some cases, so a bigger hack is
342        necessary to valid the TLB after having done the accesses */
343 
344     v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
345     v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
346     cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
347     cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348 
349     /* clear busy bit (it is restartable) */
350     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
351         target_ulong ptr;
352         uint32_t e2;
353 
354         ptr = env->gdt.base + (env->tr.selector & ~7);
355         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
356         e2 &= ~DESC_TSS_BUSY_MASK;
357         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358     }
359     old_eflags = cpu_compute_eflags(env);
360     if (source == SWITCH_TSS_IRET) {
361         old_eflags &= ~NT_MASK;
362     }
363 
364     /* save the current state in the old TSS */
365     if (type & 8) {
366         /* 32 bit */
367         cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
368         cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
369         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
370         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
371         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
372         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
373         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
374         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
375         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
376         cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
377         for (i = 0; i < 6; i++) {
378             cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
379                               env->segs[i].selector, retaddr);
380         }
381     } else {
382         /* 16 bit */
383         cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
384         cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
385         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
386         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
387         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
388         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
389         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
390         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
391         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
392         cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
393         for (i = 0; i < 4; i++) {
394             cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
395                               env->segs[i].selector, retaddr);
396         }
397     }
398 
399     /* now if an exception occurs, it will occurs in the next task
400        context */
401 
402     if (source == SWITCH_TSS_CALL) {
403         cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
404         new_eflags |= NT_MASK;
405     }
406 
407     /* set busy bit */
408     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409         target_ulong ptr;
410         uint32_t e2;
411 
412         ptr = env->gdt.base + (tss_selector & ~7);
413         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
414         e2 |= DESC_TSS_BUSY_MASK;
415         cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
416     }
417 
418     /* set the new CPU state */
419     /* from this point, any exception which occurs can give problems */
420     env->cr[0] |= CR0_TS_MASK;
421     env->hflags |= HF_TS_MASK;
422     env->tr.selector = tss_selector;
423     env->tr.base = tss_base;
424     env->tr.limit = tss_limit;
425     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 
427     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
428         cpu_x86_update_cr3(env, new_cr3);
429     }
430 
431     /* load all registers without an exception, then reload them with
432        possible exception */
433     env->eip = new_eip;
434     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
435         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
436     if (!(type & 8)) {
437         eflags_mask &= 0xffff;
438     }
439     cpu_load_eflags(env, new_eflags, eflags_mask);
440     /* XXX: what to do in 16 bit case? */
441     env->regs[R_EAX] = new_regs[0];
442     env->regs[R_ECX] = new_regs[1];
443     env->regs[R_EDX] = new_regs[2];
444     env->regs[R_EBX] = new_regs[3];
445     env->regs[R_ESP] = new_regs[4];
446     env->regs[R_EBP] = new_regs[5];
447     env->regs[R_ESI] = new_regs[6];
448     env->regs[R_EDI] = new_regs[7];
449     if (new_eflags & VM_MASK) {
450         for (i = 0; i < 6; i++) {
451             load_seg_vm(env, i, new_segs[i]);
452         }
453     } else {
454         /* first just selectors as the rest may trigger exceptions */
455         for (i = 0; i < 6; i++) {
456             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
457         }
458     }
459 
460     env->ldt.selector = new_ldt & ~4;
461     env->ldt.base = 0;
462     env->ldt.limit = 0;
463     env->ldt.flags = 0;
464 
465     /* load the LDT */
466     if (new_ldt & 4) {
467         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
468     }
469 
470     if ((new_ldt & 0xfffc) != 0) {
471         dt = &env->gdt;
472         index = new_ldt & ~7;
473         if ((index + 7) > dt->limit) {
474             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475         }
476         ptr = dt->base + index;
477         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
478         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
479         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
480             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481         }
482         if (!(e2 & DESC_P_MASK)) {
483             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484         }
485         load_seg_cache_raw_dt(&env->ldt, e1, e2);
486     }
487 
488     /* load the segments */
489     if (!(new_eflags & VM_MASK)) {
490         int cpl = new_segs[R_CS] & 3;
491         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
492         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
493         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
494         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
495         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
496         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
497     }
498 
499     /* check that env->eip is in the CS segment limits */
500     if (new_eip > env->segs[R_CS].limit) {
501         /* XXX: different exception if CALL? */
502         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
503     }
504 
505 #ifndef CONFIG_USER_ONLY
506     /* reset local breakpoints */
507     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
508         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509     }
510 #endif
511 }
512 
513 static void switch_tss(CPUX86State *env, int tss_selector,
514                        uint32_t e1, uint32_t e2, int source,
515                         uint32_t next_eip)
516 {
517     switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
518 }
519 
520 static inline unsigned int get_sp_mask(unsigned int e2)
521 {
522 #ifdef TARGET_X86_64
523     if (e2 & DESC_L_MASK) {
524         return 0;
525     } else
526 #endif
527     if (e2 & DESC_B_MASK) {
528         return 0xffffffff;
529     } else {
530         return 0xffff;
531     }
532 }
533 
534 static int exception_has_error_code(int intno)
535 {
536     switch (intno) {
537     case 8:
538     case 10:
539     case 11:
540     case 12:
541     case 13:
542     case 14:
543     case 17:
544         return 1;
545     }
546     return 0;
547 }
548 
549 #ifdef TARGET_X86_64
550 #define SET_ESP(val, sp_mask)                                   \
551     do {                                                        \
552         if ((sp_mask) == 0xffff) {                              \
553             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
554                 ((val) & 0xffff);                               \
555         } else if ((sp_mask) == 0xffffffffLL) {                 \
556             env->regs[R_ESP] = (uint32_t)(val);                 \
557         } else {                                                \
558             env->regs[R_ESP] = (val);                           \
559         }                                                       \
560     } while (0)
561 #else
562 #define SET_ESP(val, sp_mask)                                   \
563     do {                                                        \
564         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
565             ((val) & (sp_mask));                                \
566     } while (0)
567 #endif
568 
569 /* in 64-bit machines, this can overflow. So this segment addition macro
570  * can be used to trim the value to 32-bit whenever needed */
571 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
572 
573 /* XXX: add a is_user flag to have proper security support */
574 #define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
575     {                                                            \
576         sp -= 2;                                                 \
577         cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
578     }
579 
580 #define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
581     {                                                                   \
582         sp -= 4;                                                        \
583         cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
584     }
585 
586 #define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
587     {                                                            \
588         val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
589         sp += 2;                                                 \
590     }
591 
592 #define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
593     {                                                                   \
594         val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
595         sp += 4;                                                        \
596     }
597 
598 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
599 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
600 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
601 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
602 
603 /* protected mode interrupt */
604 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
605                                    int error_code, unsigned int next_eip,
606                                    int is_hw)
607 {
608     SegmentCache *dt;
609     target_ulong ptr, ssp;
610     int type, dpl, selector, ss_dpl, cpl;
611     int has_error_code, new_stack, shift;
612     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
613     uint32_t old_eip, sp_mask;
614     int vm86 = env->eflags & VM_MASK;
615 
616     has_error_code = 0;
617     if (!is_int && !is_hw) {
618         has_error_code = exception_has_error_code(intno);
619     }
620     if (is_int) {
621         old_eip = next_eip;
622     } else {
623         old_eip = env->eip;
624     }
625 
626     dt = &env->idt;
627     if (intno * 8 + 7 > dt->limit) {
628         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
629     }
630     ptr = dt->base + intno * 8;
631     e1 = cpu_ldl_kernel(env, ptr);
632     e2 = cpu_ldl_kernel(env, ptr + 4);
633     /* check gate type */
634     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
635     switch (type) {
636     case 5: /* task gate */
637         /* must do that check here to return the correct error code */
638         if (!(e2 & DESC_P_MASK)) {
639             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
640         }
641         switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
642         if (has_error_code) {
643             int type;
644             uint32_t mask;
645 
646             /* push the error code */
647             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648             shift = type >> 3;
649             if (env->segs[R_SS].flags & DESC_B_MASK) {
650                 mask = 0xffffffff;
651             } else {
652                 mask = 0xffff;
653             }
654             esp = (env->regs[R_ESP] - (2 << shift)) & mask;
655             ssp = env->segs[R_SS].base + esp;
656             if (shift) {
657                 cpu_stl_kernel(env, ssp, error_code);
658             } else {
659                 cpu_stw_kernel(env, ssp, error_code);
660             }
661             SET_ESP(esp, mask);
662         }
663         return;
664     case 6: /* 286 interrupt gate */
665     case 7: /* 286 trap gate */
666     case 14: /* 386 interrupt gate */
667     case 15: /* 386 trap gate */
668         break;
669     default:
670         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
671         break;
672     }
673     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
674     cpl = env->hflags & HF_CPL_MASK;
675     /* check privilege if software int */
676     if (is_int && dpl < cpl) {
677         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
678     }
679     /* check valid bit */
680     if (!(e2 & DESC_P_MASK)) {
681         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
682     }
683     selector = e1 >> 16;
684     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
685     if ((selector & 0xfffc) == 0) {
686         raise_exception_err(env, EXCP0D_GPF, 0);
687     }
688     if (load_segment(env, &e1, &e2, selector) != 0) {
689         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
690     }
691     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
692         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
693     }
694     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695     if (dpl > cpl) {
696         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
697     }
698     if (!(e2 & DESC_P_MASK)) {
699         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
700     }
701     if (e2 & DESC_C_MASK) {
702         dpl = cpl;
703     }
704     if (dpl < cpl) {
705         /* to inner privilege */
706         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
707         if ((ss & 0xfffc) == 0) {
708             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
709         }
710         if ((ss & 3) != dpl) {
711             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
712         }
713         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
714             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
715         }
716         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
717         if (ss_dpl != dpl) {
718             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719         }
720         if (!(ss_e2 & DESC_S_MASK) ||
721             (ss_e2 & DESC_CS_MASK) ||
722             !(ss_e2 & DESC_W_MASK)) {
723             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
724         }
725         if (!(ss_e2 & DESC_P_MASK)) {
726             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
727         }
728         new_stack = 1;
729         sp_mask = get_sp_mask(ss_e2);
730         ssp = get_seg_base(ss_e1, ss_e2);
731     } else  {
732         /* to same privilege */
733         if (vm86) {
734             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
735         }
736         new_stack = 0;
737         sp_mask = get_sp_mask(env->segs[R_SS].flags);
738         ssp = env->segs[R_SS].base;
739         esp = env->regs[R_ESP];
740     }
741 
742     shift = type >> 3;
743 
744 #if 0
745     /* XXX: check that enough room is available */
746     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
747     if (vm86) {
748         push_size += 8;
749     }
750     push_size <<= shift;
751 #endif
752     if (shift == 1) {
753         if (new_stack) {
754             if (vm86) {
755                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
756                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
757                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
758                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
759             }
760             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
761             PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
762         }
763         PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
764         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
765         PUSHL(ssp, esp, sp_mask, old_eip);
766         if (has_error_code) {
767             PUSHL(ssp, esp, sp_mask, error_code);
768         }
769     } else {
770         if (new_stack) {
771             if (vm86) {
772                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
773                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
774                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
775                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
776             }
777             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
778             PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
779         }
780         PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
781         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
782         PUSHW(ssp, esp, sp_mask, old_eip);
783         if (has_error_code) {
784             PUSHW(ssp, esp, sp_mask, error_code);
785         }
786     }
787 
788     /* interrupt gate clear IF mask */
789     if ((type & 1) == 0) {
790         env->eflags &= ~IF_MASK;
791     }
792     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
793 
794     if (new_stack) {
795         if (vm86) {
796             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
797             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
798             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
799             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
800         }
801         ss = (ss & ~3) | dpl;
802         cpu_x86_load_seg_cache(env, R_SS, ss,
803                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
804     }
805     SET_ESP(esp, sp_mask);
806 
807     selector = (selector & ~3) | dpl;
808     cpu_x86_load_seg_cache(env, R_CS, selector,
809                    get_seg_base(e1, e2),
810                    get_seg_limit(e1, e2),
811                    e2);
812     env->eip = offset;
813 }
814 
815 #ifdef TARGET_X86_64
816 
817 #define PUSHQ_RA(sp, val, ra)                   \
818     {                                           \
819         sp -= 8;                                \
820         cpu_stq_kernel_ra(env, sp, (val), ra);  \
821     }
822 
823 #define POPQ_RA(sp, val, ra)                    \
824     {                                           \
825         val = cpu_ldq_kernel_ra(env, sp, ra);   \
826         sp += 8;                                \
827     }
828 
829 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
830 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
831 
832 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
833 {
834     X86CPU *cpu = env_archcpu(env);
835     int index;
836 
837 #if 0
838     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
839            env->tr.base, env->tr.limit);
840 #endif
841 
842     if (!(env->tr.flags & DESC_P_MASK)) {
843         cpu_abort(CPU(cpu), "invalid tss");
844     }
845     index = 8 * level + 4;
846     if ((index + 7) > env->tr.limit) {
847         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
848     }
849     return cpu_ldq_kernel(env, env->tr.base + index);
850 }
851 
852 /* 64 bit interrupt */
853 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
854                            int error_code, target_ulong next_eip, int is_hw)
855 {
856     SegmentCache *dt;
857     target_ulong ptr;
858     int type, dpl, selector, cpl, ist;
859     int has_error_code, new_stack;
860     uint32_t e1, e2, e3, ss;
861     target_ulong old_eip, esp, offset;
862 
863     has_error_code = 0;
864     if (!is_int && !is_hw) {
865         has_error_code = exception_has_error_code(intno);
866     }
867     if (is_int) {
868         old_eip = next_eip;
869     } else {
870         old_eip = env->eip;
871     }
872 
873     dt = &env->idt;
874     if (intno * 16 + 15 > dt->limit) {
875         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
876     }
877     ptr = dt->base + intno * 16;
878     e1 = cpu_ldl_kernel(env, ptr);
879     e2 = cpu_ldl_kernel(env, ptr + 4);
880     e3 = cpu_ldl_kernel(env, ptr + 8);
881     /* check gate type */
882     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
883     switch (type) {
884     case 14: /* 386 interrupt gate */
885     case 15: /* 386 trap gate */
886         break;
887     default:
888         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
889         break;
890     }
891     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
892     cpl = env->hflags & HF_CPL_MASK;
893     /* check privilege if software int */
894     if (is_int && dpl < cpl) {
895         raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
896     }
897     /* check valid bit */
898     if (!(e2 & DESC_P_MASK)) {
899         raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
900     }
901     selector = e1 >> 16;
902     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
903     ist = e2 & 7;
904     if ((selector & 0xfffc) == 0) {
905         raise_exception_err(env, EXCP0D_GPF, 0);
906     }
907 
908     if (load_segment(env, &e1, &e2, selector) != 0) {
909         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
910     }
911     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
912         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
913     }
914     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
915     if (dpl > cpl) {
916         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
917     }
918     if (!(e2 & DESC_P_MASK)) {
919         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
920     }
921     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
922         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
923     }
924     if (e2 & DESC_C_MASK) {
925         dpl = cpl;
926     }
927     if (dpl < cpl || ist != 0) {
928         /* to inner privilege */
929         new_stack = 1;
930         esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
931         ss = 0;
932     } else {
933         /* to same privilege */
934         if (env->eflags & VM_MASK) {
935             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
936         }
937         new_stack = 0;
938         esp = env->regs[R_ESP];
939     }
940     esp &= ~0xfLL; /* align stack */
941 
942     PUSHQ(esp, env->segs[R_SS].selector);
943     PUSHQ(esp, env->regs[R_ESP]);
944     PUSHQ(esp, cpu_compute_eflags(env));
945     PUSHQ(esp, env->segs[R_CS].selector);
946     PUSHQ(esp, old_eip);
947     if (has_error_code) {
948         PUSHQ(esp, error_code);
949     }
950 
951     /* interrupt gate clear IF mask */
952     if ((type & 1) == 0) {
953         env->eflags &= ~IF_MASK;
954     }
955     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956 
957     if (new_stack) {
958         ss = 0 | dpl;
959         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
960     }
961     env->regs[R_ESP] = esp;
962 
963     selector = (selector & ~3) | dpl;
964     cpu_x86_load_seg_cache(env, R_CS, selector,
965                    get_seg_base(e1, e2),
966                    get_seg_limit(e1, e2),
967                    e2);
968     env->eip = offset;
969 }
970 #endif
971 
972 #ifdef TARGET_X86_64
973 #if defined(CONFIG_USER_ONLY)
974 void helper_syscall(CPUX86State *env, int next_eip_addend)
975 {
976     CPUState *cs = env_cpu(env);
977 
978     cs->exception_index = EXCP_SYSCALL;
979     env->exception_is_int = 0;
980     env->exception_next_eip = env->eip + next_eip_addend;
981     cpu_loop_exit(cs);
982 }
983 #else
984 void helper_syscall(CPUX86State *env, int next_eip_addend)
985 {
986     int selector;
987 
988     if (!(env->efer & MSR_EFER_SCE)) {
989         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
990     }
991     selector = (env->star >> 32) & 0xffff;
992     if (env->hflags & HF_LMA_MASK) {
993         int code64;
994 
995         env->regs[R_ECX] = env->eip + next_eip_addend;
996         env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
997 
998         code64 = env->hflags & HF_CS64_MASK;
999 
1000         env->eflags &= ~(env->fmask | RF_MASK);
1001         cpu_load_eflags(env, env->eflags, 0);
1002         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1003                            0, 0xffffffff,
1004                                DESC_G_MASK | DESC_P_MASK |
1005                                DESC_S_MASK |
1006                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1007                                DESC_L_MASK);
1008         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1009                                0, 0xffffffff,
1010                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1011                                DESC_S_MASK |
1012                                DESC_W_MASK | DESC_A_MASK);
1013         if (code64) {
1014             env->eip = env->lstar;
1015         } else {
1016             env->eip = env->cstar;
1017         }
1018     } else {
1019         env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1020 
1021         env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1022         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1023                            0, 0xffffffff,
1024                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025                                DESC_S_MASK |
1026                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1027         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1028                                0, 0xffffffff,
1029                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030                                DESC_S_MASK |
1031                                DESC_W_MASK | DESC_A_MASK);
1032         env->eip = (uint32_t)env->star;
1033     }
1034 }
1035 #endif
1036 #endif
1037 
1038 #ifdef TARGET_X86_64
1039 void helper_sysret(CPUX86State *env, int dflag)
1040 {
1041     int cpl, selector;
1042 
1043     if (!(env->efer & MSR_EFER_SCE)) {
1044         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1045     }
1046     cpl = env->hflags & HF_CPL_MASK;
1047     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1048         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1049     }
1050     selector = (env->star >> 48) & 0xffff;
1051     if (env->hflags & HF_LMA_MASK) {
1052         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1053                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1054                         NT_MASK);
1055         if (dflag == 2) {
1056             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1057                                    0, 0xffffffff,
1058                                    DESC_G_MASK | DESC_P_MASK |
1059                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1060                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1061                                    DESC_L_MASK);
1062             env->eip = env->regs[R_ECX];
1063         } else {
1064             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1065                                    0, 0xffffffff,
1066                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1067                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1068                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1069             env->eip = (uint32_t)env->regs[R_ECX];
1070         }
1071         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1072                                0, 0xffffffff,
1073                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1074                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1075                                DESC_W_MASK | DESC_A_MASK);
1076     } else {
1077         env->eflags |= IF_MASK;
1078         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1079                                0, 0xffffffff,
1080                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1083         env->eip = (uint32_t)env->regs[R_ECX];
1084         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1085                                0, 0xffffffff,
1086                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088                                DESC_W_MASK | DESC_A_MASK);
1089     }
1090 }
1091 #endif
1092 
1093 /* real mode interrupt */
1094 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1095                               int error_code, unsigned int next_eip)
1096 {
1097     SegmentCache *dt;
1098     target_ulong ptr, ssp;
1099     int selector;
1100     uint32_t offset, esp;
1101     uint32_t old_cs, old_eip;
1102 
1103     /* real mode (simpler!) */
1104     dt = &env->idt;
1105     if (intno * 4 + 3 > dt->limit) {
1106         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1107     }
1108     ptr = dt->base + intno * 4;
1109     offset = cpu_lduw_kernel(env, ptr);
1110     selector = cpu_lduw_kernel(env, ptr + 2);
1111     esp = env->regs[R_ESP];
1112     ssp = env->segs[R_SS].base;
1113     if (is_int) {
1114         old_eip = next_eip;
1115     } else {
1116         old_eip = env->eip;
1117     }
1118     old_cs = env->segs[R_CS].selector;
1119     /* XXX: use SS segment size? */
1120     PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1121     PUSHW(ssp, esp, 0xffff, old_cs);
1122     PUSHW(ssp, esp, 0xffff, old_eip);
1123 
1124     /* update processor state */
1125     env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1126     env->eip = offset;
1127     env->segs[R_CS].selector = selector;
1128     env->segs[R_CS].base = (selector << 4);
1129     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1130 }
1131 
1132 #if defined(CONFIG_USER_ONLY)
1133 /* fake user mode interrupt. is_int is TRUE if coming from the int
1134  * instruction. next_eip is the env->eip value AFTER the interrupt
1135  * instruction. It is only relevant if is_int is TRUE or if intno
1136  * is EXCP_SYSCALL.
1137  */
1138 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1139                               int error_code, target_ulong next_eip)
1140 {
1141     if (is_int) {
1142         SegmentCache *dt;
1143         target_ulong ptr;
1144         int dpl, cpl, shift;
1145         uint32_t e2;
1146 
1147         dt = &env->idt;
1148         if (env->hflags & HF_LMA_MASK) {
1149             shift = 4;
1150         } else {
1151             shift = 3;
1152         }
1153         ptr = dt->base + (intno << shift);
1154         e2 = cpu_ldl_kernel(env, ptr + 4);
1155 
1156         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1157         cpl = env->hflags & HF_CPL_MASK;
1158         /* check privilege if software int */
1159         if (dpl < cpl) {
1160             raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1161         }
1162     }
1163 
1164     /* Since we emulate only user space, we cannot do more than
1165        exiting the emulation with the suitable exception and error
1166        code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1167     if (is_int || intno == EXCP_SYSCALL) {
1168         env->eip = next_eip;
1169     }
1170 }
1171 
1172 #else
1173 
1174 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1175                             int error_code, int is_hw, int rm)
1176 {
1177     CPUState *cs = env_cpu(env);
1178     uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1179                                                           control.event_inj));
1180 
1181     if (!(event_inj & SVM_EVTINJ_VALID)) {
1182         int type;
1183 
1184         if (is_int) {
1185             type = SVM_EVTINJ_TYPE_SOFT;
1186         } else {
1187             type = SVM_EVTINJ_TYPE_EXEPT;
1188         }
1189         event_inj = intno | type | SVM_EVTINJ_VALID;
1190         if (!rm && exception_has_error_code(intno)) {
1191             event_inj |= SVM_EVTINJ_VALID_ERR;
1192             x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1193                                              control.event_inj_err),
1194                      error_code);
1195         }
1196         x86_stl_phys(cs,
1197                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1198                  event_inj);
1199     }
1200 }
1201 #endif
1202 
1203 /*
1204  * Begin execution of an interruption. is_int is TRUE if coming from
1205  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1206  * instruction. It is only relevant if is_int is TRUE.
1207  */
1208 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1209                              int error_code, target_ulong next_eip, int is_hw)
1210 {
1211     CPUX86State *env = &cpu->env;
1212 
1213     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1214         if ((env->cr[0] & CR0_PE_MASK)) {
1215             static int count;
1216 
1217             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1218                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1219                      count, intno, error_code, is_int,
1220                      env->hflags & HF_CPL_MASK,
1221                      env->segs[R_CS].selector, env->eip,
1222                      (int)env->segs[R_CS].base + env->eip,
1223                      env->segs[R_SS].selector, env->regs[R_ESP]);
1224             if (intno == 0x0e) {
1225                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1226             } else {
1227                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1228             }
1229             qemu_log("\n");
1230             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1231 #if 0
1232             {
1233                 int i;
1234                 target_ulong ptr;
1235 
1236                 qemu_log("       code=");
1237                 ptr = env->segs[R_CS].base + env->eip;
1238                 for (i = 0; i < 16; i++) {
1239                     qemu_log(" %02x", ldub(ptr + i));
1240                 }
1241                 qemu_log("\n");
1242             }
1243 #endif
1244             count++;
1245         }
1246     }
1247     if (env->cr[0] & CR0_PE_MASK) {
1248 #if !defined(CONFIG_USER_ONLY)
1249         if (env->hflags & HF_GUEST_MASK) {
1250             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1251         }
1252 #endif
1253 #ifdef TARGET_X86_64
1254         if (env->hflags & HF_LMA_MASK) {
1255             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1256         } else
1257 #endif
1258         {
1259             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1260                                    is_hw);
1261         }
1262     } else {
1263 #if !defined(CONFIG_USER_ONLY)
1264         if (env->hflags & HF_GUEST_MASK) {
1265             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1266         }
1267 #endif
1268         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1269     }
1270 
1271 #if !defined(CONFIG_USER_ONLY)
1272     if (env->hflags & HF_GUEST_MASK) {
1273         CPUState *cs = CPU(cpu);
1274         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1275                                       offsetof(struct vmcb,
1276                                                control.event_inj));
1277 
1278         x86_stl_phys(cs,
1279                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1280                  event_inj & ~SVM_EVTINJ_VALID);
1281     }
1282 #endif
1283 }
1284 
1285 void x86_cpu_do_interrupt(CPUState *cs)
1286 {
1287     X86CPU *cpu = X86_CPU(cs);
1288     CPUX86State *env = &cpu->env;
1289 
1290 #if defined(CONFIG_USER_ONLY)
1291     /* if user mode only, we simulate a fake exception
1292        which will be handled outside the cpu execution
1293        loop */
1294     do_interrupt_user(env, cs->exception_index,
1295                       env->exception_is_int,
1296                       env->error_code,
1297                       env->exception_next_eip);
1298     /* successfully delivered */
1299     env->old_exception = -1;
1300 #else
1301     if (cs->exception_index >= EXCP_VMEXIT) {
1302         assert(env->old_exception == -1);
1303         do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1304     } else {
1305         do_interrupt_all(cpu, cs->exception_index,
1306                          env->exception_is_int,
1307                          env->error_code,
1308                          env->exception_next_eip, 0);
1309         /* successfully delivered */
1310         env->old_exception = -1;
1311     }
1312 #endif
1313 }
1314 
1315 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1316 {
1317     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1318 }
1319 
1320 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1321 {
1322     X86CPU *cpu = X86_CPU(cs);
1323     CPUX86State *env = &cpu->env;
1324     int intno;
1325 
1326     interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1327     if (!interrupt_request) {
1328         return false;
1329     }
1330 
1331     /* Don't process multiple interrupt requests in a single call.
1332      * This is required to make icount-driven execution deterministic.
1333      */
1334     switch (interrupt_request) {
1335 #if !defined(CONFIG_USER_ONLY)
1336     case CPU_INTERRUPT_POLL:
1337         cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1338         apic_poll_irq(cpu->apic_state);
1339         break;
1340 #endif
1341     case CPU_INTERRUPT_SIPI:
1342         do_cpu_sipi(cpu);
1343         break;
1344     case CPU_INTERRUPT_SMI:
1345         cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1346         cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1347         do_smm_enter(cpu);
1348         break;
1349     case CPU_INTERRUPT_NMI:
1350         cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1351         cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1352         env->hflags2 |= HF2_NMI_MASK;
1353         do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1354         break;
1355     case CPU_INTERRUPT_MCE:
1356         cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1357         do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1358         break;
1359     case CPU_INTERRUPT_HARD:
1360         cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1361         cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1362                                    CPU_INTERRUPT_VIRQ);
1363         intno = cpu_get_pic_interrupt(env);
1364         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1365                       "Servicing hardware INT=0x%02x\n", intno);
1366         do_interrupt_x86_hardirq(env, intno, 1);
1367         break;
1368 #if !defined(CONFIG_USER_ONLY)
1369     case CPU_INTERRUPT_VIRQ:
1370         /* FIXME: this should respect TPR */
1371         cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1372         intno = x86_ldl_phys(cs, env->vm_vmcb
1373                              + offsetof(struct vmcb, control.int_vector));
1374         qemu_log_mask(CPU_LOG_TB_IN_ASM,
1375                       "Servicing virtual hardware INT=0x%02x\n", intno);
1376         do_interrupt_x86_hardirq(env, intno, 1);
1377         cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1378         break;
1379 #endif
1380     }
1381 
1382     /* Ensure that no TB jump will be modified as the program flow was changed.  */
1383     return true;
1384 }
1385 
1386 void helper_lldt(CPUX86State *env, int selector)
1387 {
1388     SegmentCache *dt;
1389     uint32_t e1, e2;
1390     int index, entry_limit;
1391     target_ulong ptr;
1392 
1393     selector &= 0xffff;
1394     if ((selector & 0xfffc) == 0) {
1395         /* XXX: NULL selector case: invalid LDT */
1396         env->ldt.base = 0;
1397         env->ldt.limit = 0;
1398     } else {
1399         if (selector & 0x4) {
1400             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1401         }
1402         dt = &env->gdt;
1403         index = selector & ~7;
1404 #ifdef TARGET_X86_64
1405         if (env->hflags & HF_LMA_MASK) {
1406             entry_limit = 15;
1407         } else
1408 #endif
1409         {
1410             entry_limit = 7;
1411         }
1412         if ((index + entry_limit) > dt->limit) {
1413             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1414         }
1415         ptr = dt->base + index;
1416         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1417         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1418         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1419             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1420         }
1421         if (!(e2 & DESC_P_MASK)) {
1422             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1423         }
1424 #ifdef TARGET_X86_64
1425         if (env->hflags & HF_LMA_MASK) {
1426             uint32_t e3;
1427 
1428             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1429             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1430             env->ldt.base |= (target_ulong)e3 << 32;
1431         } else
1432 #endif
1433         {
1434             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1435         }
1436     }
1437     env->ldt.selector = selector;
1438 }
1439 
1440 void helper_ltr(CPUX86State *env, int selector)
1441 {
1442     SegmentCache *dt;
1443     uint32_t e1, e2;
1444     int index, type, entry_limit;
1445     target_ulong ptr;
1446 
1447     selector &= 0xffff;
1448     if ((selector & 0xfffc) == 0) {
1449         /* NULL selector case: invalid TR */
1450         env->tr.base = 0;
1451         env->tr.limit = 0;
1452         env->tr.flags = 0;
1453     } else {
1454         if (selector & 0x4) {
1455             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1456         }
1457         dt = &env->gdt;
1458         index = selector & ~7;
1459 #ifdef TARGET_X86_64
1460         if (env->hflags & HF_LMA_MASK) {
1461             entry_limit = 15;
1462         } else
1463 #endif
1464         {
1465             entry_limit = 7;
1466         }
1467         if ((index + entry_limit) > dt->limit) {
1468             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1469         }
1470         ptr = dt->base + index;
1471         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1472         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1473         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1474         if ((e2 & DESC_S_MASK) ||
1475             (type != 1 && type != 9)) {
1476             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1477         }
1478         if (!(e2 & DESC_P_MASK)) {
1479             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1480         }
1481 #ifdef TARGET_X86_64
1482         if (env->hflags & HF_LMA_MASK) {
1483             uint32_t e3, e4;
1484 
1485             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1486             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1487             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1488                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1489             }
1490             load_seg_cache_raw_dt(&env->tr, e1, e2);
1491             env->tr.base |= (target_ulong)e3 << 32;
1492         } else
1493 #endif
1494         {
1495             load_seg_cache_raw_dt(&env->tr, e1, e2);
1496         }
1497         e2 |= DESC_TSS_BUSY_MASK;
1498         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1499     }
1500     env->tr.selector = selector;
1501 }
1502 
1503 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1504 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1505 {
1506     uint32_t e1, e2;
1507     int cpl, dpl, rpl;
1508     SegmentCache *dt;
1509     int index;
1510     target_ulong ptr;
1511 
1512     selector &= 0xffff;
1513     cpl = env->hflags & HF_CPL_MASK;
1514     if ((selector & 0xfffc) == 0) {
1515         /* null selector case */
1516         if (seg_reg == R_SS
1517 #ifdef TARGET_X86_64
1518             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1519 #endif
1520             ) {
1521             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1522         }
1523         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1524     } else {
1525 
1526         if (selector & 0x4) {
1527             dt = &env->ldt;
1528         } else {
1529             dt = &env->gdt;
1530         }
1531         index = selector & ~7;
1532         if ((index + 7) > dt->limit) {
1533             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1534         }
1535         ptr = dt->base + index;
1536         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1537         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1538 
1539         if (!(e2 & DESC_S_MASK)) {
1540             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1541         }
1542         rpl = selector & 3;
1543         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1544         if (seg_reg == R_SS) {
1545             /* must be writable segment */
1546             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1547                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548             }
1549             if (rpl != cpl || dpl != cpl) {
1550                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1551             }
1552         } else {
1553             /* must be readable segment */
1554             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1555                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1556             }
1557 
1558             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1559                 /* if not conforming code, test rights */
1560                 if (dpl < cpl || dpl < rpl) {
1561                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1562                 }
1563             }
1564         }
1565 
1566         if (!(e2 & DESC_P_MASK)) {
1567             if (seg_reg == R_SS) {
1568                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1569             } else {
1570                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1571             }
1572         }
1573 
1574         /* set the access bit if not already set */
1575         if (!(e2 & DESC_A_MASK)) {
1576             e2 |= DESC_A_MASK;
1577             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1578         }
1579 
1580         cpu_x86_load_seg_cache(env, seg_reg, selector,
1581                        get_seg_base(e1, e2),
1582                        get_seg_limit(e1, e2),
1583                        e2);
1584 #if 0
1585         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1586                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1587 #endif
1588     }
1589 }
1590 
1591 /* protected mode jump */
1592 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1593                            target_ulong next_eip)
1594 {
1595     int gate_cs, type;
1596     uint32_t e1, e2, cpl, dpl, rpl, limit;
1597 
1598     if ((new_cs & 0xfffc) == 0) {
1599         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1600     }
1601     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1602         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1603     }
1604     cpl = env->hflags & HF_CPL_MASK;
1605     if (e2 & DESC_S_MASK) {
1606         if (!(e2 & DESC_CS_MASK)) {
1607             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1608         }
1609         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1610         if (e2 & DESC_C_MASK) {
1611             /* conforming code segment */
1612             if (dpl > cpl) {
1613                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1614             }
1615         } else {
1616             /* non conforming code segment */
1617             rpl = new_cs & 3;
1618             if (rpl > cpl) {
1619                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1620             }
1621             if (dpl != cpl) {
1622                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1623             }
1624         }
1625         if (!(e2 & DESC_P_MASK)) {
1626             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1627         }
1628         limit = get_seg_limit(e1, e2);
1629         if (new_eip > limit &&
1630             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1631             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1632         }
1633         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1634                        get_seg_base(e1, e2), limit, e2);
1635         env->eip = new_eip;
1636     } else {
1637         /* jump to call or task gate */
1638         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1639         rpl = new_cs & 3;
1640         cpl = env->hflags & HF_CPL_MASK;
1641         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1642 
1643 #ifdef TARGET_X86_64
1644         if (env->efer & MSR_EFER_LMA) {
1645             if (type != 12) {
1646                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1647             }
1648         }
1649 #endif
1650         switch (type) {
1651         case 1: /* 286 TSS */
1652         case 9: /* 386 TSS */
1653         case 5: /* task gate */
1654             if (dpl < cpl || dpl < rpl) {
1655                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1656             }
1657             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1658             break;
1659         case 4: /* 286 call gate */
1660         case 12: /* 386 call gate */
1661             if ((dpl < cpl) || (dpl < rpl)) {
1662                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1663             }
1664             if (!(e2 & DESC_P_MASK)) {
1665                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1666             }
1667             gate_cs = e1 >> 16;
1668             new_eip = (e1 & 0xffff);
1669             if (type == 12) {
1670                 new_eip |= (e2 & 0xffff0000);
1671             }
1672 
1673 #ifdef TARGET_X86_64
1674             if (env->efer & MSR_EFER_LMA) {
1675                 /* load the upper 8 bytes of the 64-bit call gate */
1676                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1677                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1678                                            GETPC());
1679                 }
1680                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1681                 if (type != 0) {
1682                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1683                                            GETPC());
1684                 }
1685                 new_eip |= ((target_ulong)e1) << 32;
1686             }
1687 #endif
1688 
1689             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1690                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1691             }
1692             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1693             /* must be code segment */
1694             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1695                  (DESC_S_MASK | DESC_CS_MASK))) {
1696                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1697             }
1698             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1699                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1700                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1701             }
1702 #ifdef TARGET_X86_64
1703             if (env->efer & MSR_EFER_LMA) {
1704                 if (!(e2 & DESC_L_MASK)) {
1705                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1706                 }
1707                 if (e2 & DESC_B_MASK) {
1708                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1709                 }
1710             }
1711 #endif
1712             if (!(e2 & DESC_P_MASK)) {
1713                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1714             }
1715             limit = get_seg_limit(e1, e2);
1716             if (new_eip > limit &&
1717                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1718                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1719             }
1720             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1721                                    get_seg_base(e1, e2), limit, e2);
1722             env->eip = new_eip;
1723             break;
1724         default:
1725             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1726             break;
1727         }
1728     }
1729 }
1730 
1731 /* real mode call */
1732 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1733                        int shift, int next_eip)
1734 {
1735     int new_eip;
1736     uint32_t esp, esp_mask;
1737     target_ulong ssp;
1738 
1739     new_eip = new_eip1;
1740     esp = env->regs[R_ESP];
1741     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1742     ssp = env->segs[R_SS].base;
1743     if (shift) {
1744         PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1745         PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1746     } else {
1747         PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1748         PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1749     }
1750 
1751     SET_ESP(esp, esp_mask);
1752     env->eip = new_eip;
1753     env->segs[R_CS].selector = new_cs;
1754     env->segs[R_CS].base = (new_cs << 4);
1755 }
1756 
1757 /* protected mode call */
1758 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1759                             int shift, target_ulong next_eip)
1760 {
1761     int new_stack, i;
1762     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1763     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1764     uint32_t val, limit, old_sp_mask;
1765     target_ulong ssp, old_ssp, offset, sp;
1766 
1767     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1768     LOG_PCALL_STATE(env_cpu(env));
1769     if ((new_cs & 0xfffc) == 0) {
1770         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1771     }
1772     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1773         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1774     }
1775     cpl = env->hflags & HF_CPL_MASK;
1776     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1777     if (e2 & DESC_S_MASK) {
1778         if (!(e2 & DESC_CS_MASK)) {
1779             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1780         }
1781         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1782         if (e2 & DESC_C_MASK) {
1783             /* conforming code segment */
1784             if (dpl > cpl) {
1785                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1786             }
1787         } else {
1788             /* non conforming code segment */
1789             rpl = new_cs & 3;
1790             if (rpl > cpl) {
1791                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1792             }
1793             if (dpl != cpl) {
1794                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1795             }
1796         }
1797         if (!(e2 & DESC_P_MASK)) {
1798             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1799         }
1800 
1801 #ifdef TARGET_X86_64
1802         /* XXX: check 16/32 bit cases in long mode */
1803         if (shift == 2) {
1804             target_ulong rsp;
1805 
1806             /* 64 bit case */
1807             rsp = env->regs[R_ESP];
1808             PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1809             PUSHQ_RA(rsp, next_eip, GETPC());
1810             /* from this point, not restartable */
1811             env->regs[R_ESP] = rsp;
1812             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1813                                    get_seg_base(e1, e2),
1814                                    get_seg_limit(e1, e2), e2);
1815             env->eip = new_eip;
1816         } else
1817 #endif
1818         {
1819             sp = env->regs[R_ESP];
1820             sp_mask = get_sp_mask(env->segs[R_SS].flags);
1821             ssp = env->segs[R_SS].base;
1822             if (shift) {
1823                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1824                 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1825             } else {
1826                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1827                 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1828             }
1829 
1830             limit = get_seg_limit(e1, e2);
1831             if (new_eip > limit) {
1832                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1833             }
1834             /* from this point, not restartable */
1835             SET_ESP(sp, sp_mask);
1836             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1837                                    get_seg_base(e1, e2), limit, e2);
1838             env->eip = new_eip;
1839         }
1840     } else {
1841         /* check gate type */
1842         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1843         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1844         rpl = new_cs & 3;
1845 
1846 #ifdef TARGET_X86_64
1847         if (env->efer & MSR_EFER_LMA) {
1848             if (type != 12) {
1849                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1850             }
1851         }
1852 #endif
1853 
1854         switch (type) {
1855         case 1: /* available 286 TSS */
1856         case 9: /* available 386 TSS */
1857         case 5: /* task gate */
1858             if (dpl < cpl || dpl < rpl) {
1859                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1860             }
1861             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1862             return;
1863         case 4: /* 286 call gate */
1864         case 12: /* 386 call gate */
1865             break;
1866         default:
1867             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1868             break;
1869         }
1870         shift = type >> 3;
1871 
1872         if (dpl < cpl || dpl < rpl) {
1873             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1874         }
1875         /* check valid bit */
1876         if (!(e2 & DESC_P_MASK)) {
1877             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1878         }
1879         selector = e1 >> 16;
1880         param_count = e2 & 0x1f;
1881         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1882 #ifdef TARGET_X86_64
1883         if (env->efer & MSR_EFER_LMA) {
1884             /* load the upper 8 bytes of the 64-bit call gate */
1885             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1886                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1887                                        GETPC());
1888             }
1889             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1890             if (type != 0) {
1891                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1892                                        GETPC());
1893             }
1894             offset |= ((target_ulong)e1) << 32;
1895         }
1896 #endif
1897         if ((selector & 0xfffc) == 0) {
1898             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1899         }
1900 
1901         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1902             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1903         }
1904         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1905             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1906         }
1907         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1908         if (dpl > cpl) {
1909             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1910         }
1911 #ifdef TARGET_X86_64
1912         if (env->efer & MSR_EFER_LMA) {
1913             if (!(e2 & DESC_L_MASK)) {
1914                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1915             }
1916             if (e2 & DESC_B_MASK) {
1917                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1918             }
1919             shift++;
1920         }
1921 #endif
1922         if (!(e2 & DESC_P_MASK)) {
1923             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1924         }
1925 
1926         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1927             /* to inner privilege */
1928 #ifdef TARGET_X86_64
1929             if (shift == 2) {
1930                 sp = get_rsp_from_tss(env, dpl);
1931                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1932                 new_stack = 1;
1933                 sp_mask = 0;
1934                 ssp = 0;  /* SS base is always zero in IA-32e mode */
1935                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1936                           TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1937             } else
1938 #endif
1939             {
1940                 uint32_t sp32;
1941                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1942                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1943                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1944                           env->regs[R_ESP]);
1945                 sp = sp32;
1946                 if ((ss & 0xfffc) == 0) {
1947                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1948                 }
1949                 if ((ss & 3) != dpl) {
1950                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1951                 }
1952                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1953                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1954                 }
1955                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1956                 if (ss_dpl != dpl) {
1957                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1958                 }
1959                 if (!(ss_e2 & DESC_S_MASK) ||
1960                     (ss_e2 & DESC_CS_MASK) ||
1961                     !(ss_e2 & DESC_W_MASK)) {
1962                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1963                 }
1964                 if (!(ss_e2 & DESC_P_MASK)) {
1965                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1966                 }
1967 
1968                 sp_mask = get_sp_mask(ss_e2);
1969                 ssp = get_seg_base(ss_e1, ss_e2);
1970             }
1971 
1972             /* push_size = ((param_count * 2) + 8) << shift; */
1973 
1974             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1975             old_ssp = env->segs[R_SS].base;
1976 #ifdef TARGET_X86_64
1977             if (shift == 2) {
1978                 /* XXX: verify if new stack address is canonical */
1979                 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1980                 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1981                 /* parameters aren't supported for 64-bit call gates */
1982             } else
1983 #endif
1984             if (shift == 1) {
1985                 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1986                 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1987                 for (i = param_count - 1; i >= 0; i--) {
1988                     val = cpu_ldl_kernel_ra(env, old_ssp +
1989                                             ((env->regs[R_ESP] + i * 4) &
1990                                              old_sp_mask), GETPC());
1991                     PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1992                 }
1993             } else {
1994                 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1995                 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1996                 for (i = param_count - 1; i >= 0; i--) {
1997                     val = cpu_lduw_kernel_ra(env, old_ssp +
1998                                              ((env->regs[R_ESP] + i * 2) &
1999                                               old_sp_mask), GETPC());
2000                     PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
2001                 }
2002             }
2003             new_stack = 1;
2004         } else {
2005             /* to same privilege */
2006             sp = env->regs[R_ESP];
2007             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2008             ssp = env->segs[R_SS].base;
2009             /* push_size = (4 << shift); */
2010             new_stack = 0;
2011         }
2012 
2013 #ifdef TARGET_X86_64
2014         if (shift == 2) {
2015             PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2016             PUSHQ_RA(sp, next_eip, GETPC());
2017         } else
2018 #endif
2019         if (shift == 1) {
2020             PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2021             PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2022         } else {
2023             PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2024             PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2025         }
2026 
2027         /* from this point, not restartable */
2028 
2029         if (new_stack) {
2030 #ifdef TARGET_X86_64
2031             if (shift == 2) {
2032                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2033             } else
2034 #endif
2035             {
2036                 ss = (ss & ~3) | dpl;
2037                 cpu_x86_load_seg_cache(env, R_SS, ss,
2038                                        ssp,
2039                                        get_seg_limit(ss_e1, ss_e2),
2040                                        ss_e2);
2041             }
2042         }
2043 
2044         selector = (selector & ~3) | dpl;
2045         cpu_x86_load_seg_cache(env, R_CS, selector,
2046                        get_seg_base(e1, e2),
2047                        get_seg_limit(e1, e2),
2048                        e2);
2049         SET_ESP(sp, sp_mask);
2050         env->eip = offset;
2051     }
2052 }
2053 
2054 /* real and vm86 mode iret */
2055 void helper_iret_real(CPUX86State *env, int shift)
2056 {
2057     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2058     target_ulong ssp;
2059     int eflags_mask;
2060 
2061     sp_mask = 0xffff; /* XXXX: use SS segment size? */
2062     sp = env->regs[R_ESP];
2063     ssp = env->segs[R_SS].base;
2064     if (shift == 1) {
2065         /* 32 bits */
2066         POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2067         POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2068         new_cs &= 0xffff;
2069         POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2070     } else {
2071         /* 16 bits */
2072         POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2073         POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2074         POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2075     }
2076     env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2077     env->segs[R_CS].selector = new_cs;
2078     env->segs[R_CS].base = (new_cs << 4);
2079     env->eip = new_eip;
2080     if (env->eflags & VM_MASK) {
2081         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2082             NT_MASK;
2083     } else {
2084         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2085             RF_MASK | NT_MASK;
2086     }
2087     if (shift == 0) {
2088         eflags_mask &= 0xffff;
2089     }
2090     cpu_load_eflags(env, new_eflags, eflags_mask);
2091     env->hflags2 &= ~HF2_NMI_MASK;
2092 }
2093 
2094 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2095 {
2096     int dpl;
2097     uint32_t e2;
2098 
2099     /* XXX: on x86_64, we do not want to nullify FS and GS because
2100        they may still contain a valid base. I would be interested to
2101        know how a real x86_64 CPU behaves */
2102     if ((seg_reg == R_FS || seg_reg == R_GS) &&
2103         (env->segs[seg_reg].selector & 0xfffc) == 0) {
2104         return;
2105     }
2106 
2107     e2 = env->segs[seg_reg].flags;
2108     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2109     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2110         /* data or non conforming code segment */
2111         if (dpl < cpl) {
2112             cpu_x86_load_seg_cache(env, seg_reg, 0,
2113                                    env->segs[seg_reg].base,
2114                                    env->segs[seg_reg].limit,
2115                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
2116         }
2117     }
2118 }
2119 
2120 /* protected mode iret */
2121 static inline void helper_ret_protected(CPUX86State *env, int shift,
2122                                         int is_iret, int addend,
2123                                         uintptr_t retaddr)
2124 {
2125     uint32_t new_cs, new_eflags, new_ss;
2126     uint32_t new_es, new_ds, new_fs, new_gs;
2127     uint32_t e1, e2, ss_e1, ss_e2;
2128     int cpl, dpl, rpl, eflags_mask, iopl;
2129     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2130 
2131 #ifdef TARGET_X86_64
2132     if (shift == 2) {
2133         sp_mask = -1;
2134     } else
2135 #endif
2136     {
2137         sp_mask = get_sp_mask(env->segs[R_SS].flags);
2138     }
2139     sp = env->regs[R_ESP];
2140     ssp = env->segs[R_SS].base;
2141     new_eflags = 0; /* avoid warning */
2142 #ifdef TARGET_X86_64
2143     if (shift == 2) {
2144         POPQ_RA(sp, new_eip, retaddr);
2145         POPQ_RA(sp, new_cs, retaddr);
2146         new_cs &= 0xffff;
2147         if (is_iret) {
2148             POPQ_RA(sp, new_eflags, retaddr);
2149         }
2150     } else
2151 #endif
2152     {
2153         if (shift == 1) {
2154             /* 32 bits */
2155             POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2156             POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2157             new_cs &= 0xffff;
2158             if (is_iret) {
2159                 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2160                 if (new_eflags & VM_MASK) {
2161                     goto return_to_vm86;
2162                 }
2163             }
2164         } else {
2165             /* 16 bits */
2166             POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2167             POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2168             if (is_iret) {
2169                 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2170             }
2171         }
2172     }
2173     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2174               new_cs, new_eip, shift, addend);
2175     LOG_PCALL_STATE(env_cpu(env));
2176     if ((new_cs & 0xfffc) == 0) {
2177         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2178     }
2179     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2180         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2181     }
2182     if (!(e2 & DESC_S_MASK) ||
2183         !(e2 & DESC_CS_MASK)) {
2184         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2185     }
2186     cpl = env->hflags & HF_CPL_MASK;
2187     rpl = new_cs & 3;
2188     if (rpl < cpl) {
2189         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2190     }
2191     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2192     if (e2 & DESC_C_MASK) {
2193         if (dpl > rpl) {
2194             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2195         }
2196     } else {
2197         if (dpl != rpl) {
2198             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2199         }
2200     }
2201     if (!(e2 & DESC_P_MASK)) {
2202         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2203     }
2204 
2205     sp += addend;
2206     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2207                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2208         /* return to same privilege level */
2209         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2210                        get_seg_base(e1, e2),
2211                        get_seg_limit(e1, e2),
2212                        e2);
2213     } else {
2214         /* return to different privilege level */
2215 #ifdef TARGET_X86_64
2216         if (shift == 2) {
2217             POPQ_RA(sp, new_esp, retaddr);
2218             POPQ_RA(sp, new_ss, retaddr);
2219             new_ss &= 0xffff;
2220         } else
2221 #endif
2222         {
2223             if (shift == 1) {
2224                 /* 32 bits */
2225                 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2226                 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2227                 new_ss &= 0xffff;
2228             } else {
2229                 /* 16 bits */
2230                 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2231                 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2232             }
2233         }
2234         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2235                   new_ss, new_esp);
2236         if ((new_ss & 0xfffc) == 0) {
2237 #ifdef TARGET_X86_64
2238             /* NULL ss is allowed in long mode if cpl != 3 */
2239             /* XXX: test CS64? */
2240             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2241                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2242                                        0, 0xffffffff,
2243                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2244                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2245                                        DESC_W_MASK | DESC_A_MASK);
2246                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2247             } else
2248 #endif
2249             {
2250                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2251             }
2252         } else {
2253             if ((new_ss & 3) != rpl) {
2254                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2255             }
2256             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2257                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2258             }
2259             if (!(ss_e2 & DESC_S_MASK) ||
2260                 (ss_e2 & DESC_CS_MASK) ||
2261                 !(ss_e2 & DESC_W_MASK)) {
2262                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2263             }
2264             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2265             if (dpl != rpl) {
2266                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2267             }
2268             if (!(ss_e2 & DESC_P_MASK)) {
2269                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2270             }
2271             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2272                                    get_seg_base(ss_e1, ss_e2),
2273                                    get_seg_limit(ss_e1, ss_e2),
2274                                    ss_e2);
2275         }
2276 
2277         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2278                        get_seg_base(e1, e2),
2279                        get_seg_limit(e1, e2),
2280                        e2);
2281         sp = new_esp;
2282 #ifdef TARGET_X86_64
2283         if (env->hflags & HF_CS64_MASK) {
2284             sp_mask = -1;
2285         } else
2286 #endif
2287         {
2288             sp_mask = get_sp_mask(ss_e2);
2289         }
2290 
2291         /* validate data segments */
2292         validate_seg(env, R_ES, rpl);
2293         validate_seg(env, R_DS, rpl);
2294         validate_seg(env, R_FS, rpl);
2295         validate_seg(env, R_GS, rpl);
2296 
2297         sp += addend;
2298     }
2299     SET_ESP(sp, sp_mask);
2300     env->eip = new_eip;
2301     if (is_iret) {
2302         /* NOTE: 'cpl' is the _old_ CPL */
2303         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2304         if (cpl == 0) {
2305             eflags_mask |= IOPL_MASK;
2306         }
2307         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2308         if (cpl <= iopl) {
2309             eflags_mask |= IF_MASK;
2310         }
2311         if (shift == 0) {
2312             eflags_mask &= 0xffff;
2313         }
2314         cpu_load_eflags(env, new_eflags, eflags_mask);
2315     }
2316     return;
2317 
2318  return_to_vm86:
2319     POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2320     POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2321     POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2322     POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2323     POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2324     POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2325 
2326     /* modify processor state */
2327     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2328                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2329                     VIP_MASK);
2330     load_seg_vm(env, R_CS, new_cs & 0xffff);
2331     load_seg_vm(env, R_SS, new_ss & 0xffff);
2332     load_seg_vm(env, R_ES, new_es & 0xffff);
2333     load_seg_vm(env, R_DS, new_ds & 0xffff);
2334     load_seg_vm(env, R_FS, new_fs & 0xffff);
2335     load_seg_vm(env, R_GS, new_gs & 0xffff);
2336 
2337     env->eip = new_eip & 0xffff;
2338     env->regs[R_ESP] = new_esp;
2339 }
2340 
2341 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2342 {
2343     int tss_selector, type;
2344     uint32_t e1, e2;
2345 
2346     /* specific case for TSS */
2347     if (env->eflags & NT_MASK) {
2348 #ifdef TARGET_X86_64
2349         if (env->hflags & HF_LMA_MASK) {
2350             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2351         }
2352 #endif
2353         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2354         if (tss_selector & 4) {
2355             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2356         }
2357         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2358             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2359         }
2360         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2361         /* NOTE: we check both segment and busy TSS */
2362         if (type != 3) {
2363             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2364         }
2365         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2366     } else {
2367         helper_ret_protected(env, shift, 1, 0, GETPC());
2368     }
2369     env->hflags2 &= ~HF2_NMI_MASK;
2370 }
2371 
2372 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2373 {
2374     helper_ret_protected(env, shift, 0, addend, GETPC());
2375 }
2376 
2377 void helper_sysenter(CPUX86State *env)
2378 {
2379     if (env->sysenter_cs == 0) {
2380         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2381     }
2382     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2383 
2384 #ifdef TARGET_X86_64
2385     if (env->hflags & HF_LMA_MASK) {
2386         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2387                                0, 0xffffffff,
2388                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2389                                DESC_S_MASK |
2390                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2391                                DESC_L_MASK);
2392     } else
2393 #endif
2394     {
2395         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2396                                0, 0xffffffff,
2397                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2398                                DESC_S_MASK |
2399                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2400     }
2401     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2402                            0, 0xffffffff,
2403                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2404                            DESC_S_MASK |
2405                            DESC_W_MASK | DESC_A_MASK);
2406     env->regs[R_ESP] = env->sysenter_esp;
2407     env->eip = env->sysenter_eip;
2408 }
2409 
2410 void helper_sysexit(CPUX86State *env, int dflag)
2411 {
2412     int cpl;
2413 
2414     cpl = env->hflags & HF_CPL_MASK;
2415     if (env->sysenter_cs == 0 || cpl != 0) {
2416         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2417     }
2418 #ifdef TARGET_X86_64
2419     if (dflag == 2) {
2420         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2421                                3, 0, 0xffffffff,
2422                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2423                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2424                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2425                                DESC_L_MASK);
2426         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2427                                3, 0, 0xffffffff,
2428                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2429                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2430                                DESC_W_MASK | DESC_A_MASK);
2431     } else
2432 #endif
2433     {
2434         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2435                                3, 0, 0xffffffff,
2436                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2437                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2438                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2439         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2440                                3, 0, 0xffffffff,
2441                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2442                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2443                                DESC_W_MASK | DESC_A_MASK);
2444     }
2445     env->regs[R_ESP] = env->regs[R_ECX];
2446     env->eip = env->regs[R_EDX];
2447 }
2448 
2449 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2450 {
2451     unsigned int limit;
2452     uint32_t e1, e2, eflags, selector;
2453     int rpl, dpl, cpl, type;
2454 
2455     selector = selector1 & 0xffff;
2456     eflags = cpu_cc_compute_all(env, CC_OP);
2457     if ((selector & 0xfffc) == 0) {
2458         goto fail;
2459     }
2460     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2461         goto fail;
2462     }
2463     rpl = selector & 3;
2464     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2465     cpl = env->hflags & HF_CPL_MASK;
2466     if (e2 & DESC_S_MASK) {
2467         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2468             /* conforming */
2469         } else {
2470             if (dpl < cpl || dpl < rpl) {
2471                 goto fail;
2472             }
2473         }
2474     } else {
2475         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2476         switch (type) {
2477         case 1:
2478         case 2:
2479         case 3:
2480         case 9:
2481         case 11:
2482             break;
2483         default:
2484             goto fail;
2485         }
2486         if (dpl < cpl || dpl < rpl) {
2487         fail:
2488             CC_SRC = eflags & ~CC_Z;
2489             return 0;
2490         }
2491     }
2492     limit = get_seg_limit(e1, e2);
2493     CC_SRC = eflags | CC_Z;
2494     return limit;
2495 }
2496 
2497 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2498 {
2499     uint32_t e1, e2, eflags, selector;
2500     int rpl, dpl, cpl, type;
2501 
2502     selector = selector1 & 0xffff;
2503     eflags = cpu_cc_compute_all(env, CC_OP);
2504     if ((selector & 0xfffc) == 0) {
2505         goto fail;
2506     }
2507     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2508         goto fail;
2509     }
2510     rpl = selector & 3;
2511     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2512     cpl = env->hflags & HF_CPL_MASK;
2513     if (e2 & DESC_S_MASK) {
2514         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2515             /* conforming */
2516         } else {
2517             if (dpl < cpl || dpl < rpl) {
2518                 goto fail;
2519             }
2520         }
2521     } else {
2522         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2523         switch (type) {
2524         case 1:
2525         case 2:
2526         case 3:
2527         case 4:
2528         case 5:
2529         case 9:
2530         case 11:
2531         case 12:
2532             break;
2533         default:
2534             goto fail;
2535         }
2536         if (dpl < cpl || dpl < rpl) {
2537         fail:
2538             CC_SRC = eflags & ~CC_Z;
2539             return 0;
2540         }
2541     }
2542     CC_SRC = eflags | CC_Z;
2543     return e2 & 0x00f0ff00;
2544 }
2545 
2546 void helper_verr(CPUX86State *env, target_ulong selector1)
2547 {
2548     uint32_t e1, e2, eflags, selector;
2549     int rpl, dpl, cpl;
2550 
2551     selector = selector1 & 0xffff;
2552     eflags = cpu_cc_compute_all(env, CC_OP);
2553     if ((selector & 0xfffc) == 0) {
2554         goto fail;
2555     }
2556     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2557         goto fail;
2558     }
2559     if (!(e2 & DESC_S_MASK)) {
2560         goto fail;
2561     }
2562     rpl = selector & 3;
2563     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2564     cpl = env->hflags & HF_CPL_MASK;
2565     if (e2 & DESC_CS_MASK) {
2566         if (!(e2 & DESC_R_MASK)) {
2567             goto fail;
2568         }
2569         if (!(e2 & DESC_C_MASK)) {
2570             if (dpl < cpl || dpl < rpl) {
2571                 goto fail;
2572             }
2573         }
2574     } else {
2575         if (dpl < cpl || dpl < rpl) {
2576         fail:
2577             CC_SRC = eflags & ~CC_Z;
2578             return;
2579         }
2580     }
2581     CC_SRC = eflags | CC_Z;
2582 }
2583 
2584 void helper_verw(CPUX86State *env, target_ulong selector1)
2585 {
2586     uint32_t e1, e2, eflags, selector;
2587     int rpl, dpl, cpl;
2588 
2589     selector = selector1 & 0xffff;
2590     eflags = cpu_cc_compute_all(env, CC_OP);
2591     if ((selector & 0xfffc) == 0) {
2592         goto fail;
2593     }
2594     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2595         goto fail;
2596     }
2597     if (!(e2 & DESC_S_MASK)) {
2598         goto fail;
2599     }
2600     rpl = selector & 3;
2601     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2602     cpl = env->hflags & HF_CPL_MASK;
2603     if (e2 & DESC_CS_MASK) {
2604         goto fail;
2605     } else {
2606         if (dpl < cpl || dpl < rpl) {
2607             goto fail;
2608         }
2609         if (!(e2 & DESC_W_MASK)) {
2610         fail:
2611             CC_SRC = eflags & ~CC_Z;
2612             return;
2613         }
2614     }
2615     CC_SRC = eflags | CC_Z;
2616 }
2617 
2618 #if defined(CONFIG_USER_ONLY)
2619 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2620 {
2621     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2622         int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2623         selector &= 0xffff;
2624         cpu_x86_load_seg_cache(env, seg_reg, selector,
2625                                (selector << 4), 0xffff,
2626                                DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2627                                DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2628     } else {
2629         helper_load_seg(env, seg_reg, selector);
2630     }
2631 }
2632 #endif
2633 
2634 /* check if Port I/O is allowed in TSS */
2635 static inline void check_io(CPUX86State *env, int addr, int size,
2636                             uintptr_t retaddr)
2637 {
2638     int io_offset, val, mask;
2639 
2640     /* TSS must be a valid 32 bit one */
2641     if (!(env->tr.flags & DESC_P_MASK) ||
2642         ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2643         env->tr.limit < 103) {
2644         goto fail;
2645     }
2646     io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2647     io_offset += (addr >> 3);
2648     /* Note: the check needs two bytes */
2649     if ((io_offset + 1) > env->tr.limit) {
2650         goto fail;
2651     }
2652     val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2653     val >>= (addr & 7);
2654     mask = (1 << size) - 1;
2655     /* all bits must be zero to allow the I/O */
2656     if ((val & mask) != 0) {
2657     fail:
2658         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2659     }
2660 }
2661 
2662 void helper_check_iob(CPUX86State *env, uint32_t t0)
2663 {
2664     check_io(env, t0, 1, GETPC());
2665 }
2666 
2667 void helper_check_iow(CPUX86State *env, uint32_t t0)
2668 {
2669     check_io(env, t0, 2, GETPC());
2670 }
2671 
2672 void helper_check_iol(CPUX86State *env, uint32_t t0)
2673 {
2674     check_io(env, t0, 4, GETPC());
2675 }
2676