xref: /qemu/target/i386/tcg/seg_helper.c (revision bde8adb8)
1 /*
2  *  x86 segmentation related helpers:
3  *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
4  *
5  *  Copyright (c) 2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
31 
32 #ifdef TARGET_X86_64
33 #define SET_ESP(val, sp_mask)                                   \
34     do {                                                        \
35         if ((sp_mask) == 0xffff) {                              \
36             env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
37                 ((val) & 0xffff);                               \
38         } else if ((sp_mask) == 0xffffffffLL) {                 \
39             env->regs[R_ESP] = (uint32_t)(val);                 \
40         } else {                                                \
41             env->regs[R_ESP] = (val);                           \
42         }                                                       \
43     } while (0)
44 #else
45 #define SET_ESP(val, sp_mask)                                   \
46     do {                                                        \
47         env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
48             ((val) & (sp_mask));                                \
49     } while (0)
50 #endif
51 
52 /* XXX: use mmu_index to have proper DPL support */
53 typedef struct StackAccess
54 {
55     CPUX86State *env;
56     uintptr_t ra;
57     target_ulong ss_base;
58     target_ulong sp;
59     target_ulong sp_mask;
60     int mmu_index;
61 } StackAccess;
62 
pushw(StackAccess * sa,uint16_t val)63 static void pushw(StackAccess *sa, uint16_t val)
64 {
65     sa->sp -= 2;
66     cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
67                       val, sa->mmu_index, sa->ra);
68 }
69 
pushl(StackAccess * sa,uint32_t val)70 static void pushl(StackAccess *sa, uint32_t val)
71 {
72     sa->sp -= 4;
73     cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
74                       val, sa->mmu_index, sa->ra);
75 }
76 
popw(StackAccess * sa)77 static uint16_t popw(StackAccess *sa)
78 {
79     uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
80                                       sa->ss_base + (sa->sp & sa->sp_mask),
81                                       sa->mmu_index, sa->ra);
82     sa->sp += 2;
83     return ret;
84 }
85 
popl(StackAccess * sa)86 static uint32_t popl(StackAccess *sa)
87 {
88     uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
89                                      sa->ss_base + (sa->sp & sa->sp_mask),
90                                      sa->mmu_index, sa->ra);
91     sa->sp += 4;
92     return ret;
93 }
94 
get_pg_mode(CPUX86State * env)95 int get_pg_mode(CPUX86State *env)
96 {
97     int pg_mode = 0;
98     if (!(env->cr[0] & CR0_PG_MASK)) {
99         return 0;
100     }
101     if (env->cr[0] & CR0_WP_MASK) {
102         pg_mode |= PG_MODE_WP;
103     }
104     if (env->cr[4] & CR4_PAE_MASK) {
105         pg_mode |= PG_MODE_PAE;
106         if (env->efer & MSR_EFER_NXE) {
107             pg_mode |= PG_MODE_NXE;
108         }
109     }
110     if (env->cr[4] & CR4_PSE_MASK) {
111         pg_mode |= PG_MODE_PSE;
112     }
113     if (env->cr[4] & CR4_SMEP_MASK) {
114         pg_mode |= PG_MODE_SMEP;
115     }
116     if (env->hflags & HF_LMA_MASK) {
117         pg_mode |= PG_MODE_LMA;
118         if (env->cr[4] & CR4_PKE_MASK) {
119             pg_mode |= PG_MODE_PKE;
120         }
121         if (env->cr[4] & CR4_PKS_MASK) {
122             pg_mode |= PG_MODE_PKS;
123         }
124         if (env->cr[4] & CR4_LA57_MASK) {
125             pg_mode |= PG_MODE_LA57;
126         }
127     }
128     return pg_mode;
129 }
130 
131 /* return non zero if error */
load_segment_ra(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector,uintptr_t retaddr)132 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
133                                uint32_t *e2_ptr, int selector,
134                                uintptr_t retaddr)
135 {
136     SegmentCache *dt;
137     int index;
138     target_ulong ptr;
139 
140     if (selector & 0x4) {
141         dt = &env->ldt;
142     } else {
143         dt = &env->gdt;
144     }
145     index = selector & ~7;
146     if ((index + 7) > dt->limit) {
147         return -1;
148     }
149     ptr = dt->base + index;
150     *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
151     *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
152     return 0;
153 }
154 
load_segment(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector)155 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
156                                uint32_t *e2_ptr, int selector)
157 {
158     return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
159 }
160 
get_seg_limit(uint32_t e1,uint32_t e2)161 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
162 {
163     unsigned int limit;
164 
165     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
166     if (e2 & DESC_G_MASK) {
167         limit = (limit << 12) | 0xfff;
168     }
169     return limit;
170 }
171 
get_seg_base(uint32_t e1,uint32_t e2)172 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
173 {
174     return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
175 }
176 
load_seg_cache_raw_dt(SegmentCache * sc,uint32_t e1,uint32_t e2)177 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
178                                          uint32_t e2)
179 {
180     sc->base = get_seg_base(e1, e2);
181     sc->limit = get_seg_limit(e1, e2);
182     sc->flags = e2;
183 }
184 
185 /* init the segment cache in vm86 mode. */
load_seg_vm(CPUX86State * env,int seg,int selector)186 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
187 {
188     selector &= 0xffff;
189 
190     cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
191                            DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
192                            DESC_A_MASK | (3 << DESC_DPL_SHIFT));
193 }
194 
get_ss_esp_from_tss(CPUX86State * env,uint32_t * ss_ptr,uint32_t * esp_ptr,int dpl,uintptr_t retaddr)195 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
196                                        uint32_t *esp_ptr, int dpl,
197                                        uintptr_t retaddr)
198 {
199     X86CPU *cpu = env_archcpu(env);
200     int type, index, shift;
201 
202 #if 0
203     {
204         int i;
205         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
206         for (i = 0; i < env->tr.limit; i++) {
207             printf("%02x ", env->tr.base[i]);
208             if ((i & 7) == 7) {
209                 printf("\n");
210             }
211         }
212         printf("\n");
213     }
214 #endif
215 
216     if (!(env->tr.flags & DESC_P_MASK)) {
217         cpu_abort(CPU(cpu), "invalid tss");
218     }
219     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
220     if ((type & 7) != 1) {
221         cpu_abort(CPU(cpu), "invalid tss type");
222     }
223     shift = type >> 3;
224     index = (dpl * 4 + 2) << shift;
225     if (index + (4 << shift) - 1 > env->tr.limit) {
226         raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
227     }
228     if (shift == 0) {
229         *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
230         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
231     } else {
232         *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
233         *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
234     }
235 }
236 
tss_load_seg(CPUX86State * env,X86Seg seg_reg,int selector,int cpl,uintptr_t retaddr)237 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
238                          int cpl, uintptr_t retaddr)
239 {
240     uint32_t e1, e2;
241     int rpl, dpl;
242 
243     if ((selector & 0xfffc) != 0) {
244         if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
245             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
246         }
247         if (!(e2 & DESC_S_MASK)) {
248             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
249         }
250         rpl = selector & 3;
251         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
252         if (seg_reg == R_CS) {
253             if (!(e2 & DESC_CS_MASK)) {
254                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
255             }
256             if (dpl != rpl) {
257                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
258             }
259         } else if (seg_reg == R_SS) {
260             /* SS must be writable data */
261             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
262                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
263             }
264             if (dpl != cpl || dpl != rpl) {
265                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
266             }
267         } else {
268             /* not readable code */
269             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
270                 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
271             }
272             /* if data or non conforming code, checks the rights */
273             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
274                 if (dpl < cpl || dpl < rpl) {
275                     raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
276                 }
277             }
278         }
279         if (!(e2 & DESC_P_MASK)) {
280             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
281         }
282         cpu_x86_load_seg_cache(env, seg_reg, selector,
283                                get_seg_base(e1, e2),
284                                get_seg_limit(e1, e2),
285                                e2);
286     } else {
287         if (seg_reg == R_SS || seg_reg == R_CS) {
288             raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
289         }
290     }
291 }
292 
tss_set_busy(CPUX86State * env,int tss_selector,bool value,uintptr_t retaddr)293 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
294                          uintptr_t retaddr)
295 {
296     target_ulong ptr = env->gdt.base + (tss_selector & ~7);
297     uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
298 
299     if (value) {
300         e2 |= DESC_TSS_BUSY_MASK;
301     } else {
302         e2 &= ~DESC_TSS_BUSY_MASK;
303     }
304 
305     cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
306 }
307 
308 #define SWITCH_TSS_JMP  0
309 #define SWITCH_TSS_IRET 1
310 #define SWITCH_TSS_CALL 2
311 
312 /* return 0 if switching to a 16-bit selector */
switch_tss_ra(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip,uintptr_t retaddr)313 static int switch_tss_ra(CPUX86State *env, int tss_selector,
314                          uint32_t e1, uint32_t e2, int source,
315                          uint32_t next_eip, uintptr_t retaddr)
316 {
317     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
318     target_ulong tss_base;
319     uint32_t new_regs[8], new_segs[6];
320     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
321     uint32_t old_eflags, eflags_mask;
322     SegmentCache *dt;
323     int mmu_index, index;
324     target_ulong ptr;
325     X86Access old, new;
326 
327     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
328     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
329               source);
330 
331     /* if task gate, we read the TSS segment and we load it */
332     if (type == 5) {
333         if (!(e2 & DESC_P_MASK)) {
334             raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
335         }
336         tss_selector = e1 >> 16;
337         if (tss_selector & 4) {
338             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
339         }
340         if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
341             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
342         }
343         if (e2 & DESC_S_MASK) {
344             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
345         }
346         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
347         if ((type & 7) != 1) {
348             raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
349         }
350     }
351 
352     if (!(e2 & DESC_P_MASK)) {
353         raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
354     }
355 
356     if (type & 8) {
357         tss_limit_max = 103;
358     } else {
359         tss_limit_max = 43;
360     }
361     tss_limit = get_seg_limit(e1, e2);
362     tss_base = get_seg_base(e1, e2);
363     if ((tss_selector & 4) != 0 ||
364         tss_limit < tss_limit_max) {
365         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
366     }
367     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
368     if (old_type & 8) {
369         old_tss_limit_max = 103;
370     } else {
371         old_tss_limit_max = 43;
372     }
373 
374     /* new TSS must be busy iff the source is an IRET instruction  */
375     if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
376         raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
377     }
378 
379     /* X86Access avoids memory exceptions during the task switch */
380     mmu_index = cpu_mmu_index_kernel(env);
381     access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max,
382                        MMU_DATA_STORE, mmu_index, retaddr);
383 
384     if (source == SWITCH_TSS_CALL) {
385         /* Probe for future write of parent task */
386         probe_access(env, tss_base, 2, MMU_DATA_STORE,
387                      mmu_index, retaddr);
388     }
389     access_prepare_mmu(&new, env, tss_base, tss_limit,
390                        MMU_DATA_LOAD, mmu_index, retaddr);
391 
392     /* save the current state in the old TSS */
393     old_eflags = cpu_compute_eflags(env);
394     if (old_type & 8) {
395         /* 32 bit */
396         access_stl(&old, env->tr.base + 0x20, next_eip);
397         access_stl(&old, env->tr.base + 0x24, old_eflags);
398         access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
399         access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
400         access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
401         access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
402         access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
403         access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
404         access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
405         access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
406         for (i = 0; i < 6; i++) {
407             access_stw(&old, env->tr.base + (0x48 + i * 4),
408                        env->segs[i].selector);
409         }
410     } else {
411         /* 16 bit */
412         access_stw(&old, env->tr.base + 0x0e, next_eip);
413         access_stw(&old, env->tr.base + 0x10, old_eflags);
414         access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
415         access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
416         access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
417         access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
418         access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
419         access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
420         access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
421         access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
422         for (i = 0; i < 4; i++) {
423             access_stw(&old, env->tr.base + (0x22 + i * 2),
424                        env->segs[i].selector);
425         }
426     }
427 
428     /* read all the registers from the new TSS */
429     if (type & 8) {
430         /* 32 bit */
431         new_cr3 = access_ldl(&new, tss_base + 0x1c);
432         new_eip = access_ldl(&new, tss_base + 0x20);
433         new_eflags = access_ldl(&new, tss_base + 0x24);
434         for (i = 0; i < 8; i++) {
435             new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
436         }
437         for (i = 0; i < 6; i++) {
438             new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
439         }
440         new_ldt = access_ldw(&new, tss_base + 0x60);
441         new_trap = access_ldl(&new, tss_base + 0x64);
442     } else {
443         /* 16 bit */
444         new_cr3 = 0;
445         new_eip = access_ldw(&new, tss_base + 0x0e);
446         new_eflags = access_ldw(&new, tss_base + 0x10);
447         for (i = 0; i < 8; i++) {
448             new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
449         }
450         for (i = 0; i < 4; i++) {
451             new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
452         }
453         new_ldt = access_ldw(&new, tss_base + 0x2a);
454         new_segs[R_FS] = 0;
455         new_segs[R_GS] = 0;
456         new_trap = 0;
457     }
458     /* XXX: avoid a compiler warning, see
459      http://support.amd.com/us/Processor_TechDocs/24593.pdf
460      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
461     (void)new_trap;
462 
463     /* clear busy bit (it is restartable) */
464     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
465         tss_set_busy(env, env->tr.selector, 0, retaddr);
466     }
467 
468     if (source == SWITCH_TSS_IRET) {
469         old_eflags &= ~NT_MASK;
470         if (old_type & 8) {
471             access_stl(&old, env->tr.base + 0x24, old_eflags);
472         } else {
473             access_stw(&old, env->tr.base + 0x10, old_eflags);
474 	}
475     }
476 
477     if (source == SWITCH_TSS_CALL) {
478         /*
479          * Thanks to the probe_access above, we know the first two
480          * bytes addressed by &new are writable too.
481          */
482         access_stw(&new, tss_base, env->tr.selector);
483         new_eflags |= NT_MASK;
484     }
485 
486     /* set busy bit */
487     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
488         tss_set_busy(env, tss_selector, 1, retaddr);
489     }
490 
491     /* set the new CPU state */
492 
493     /* now if an exception occurs, it will occur in the next task context */
494 
495     env->cr[0] |= CR0_TS_MASK;
496     env->hflags |= HF_TS_MASK;
497     env->tr.selector = tss_selector;
498     env->tr.base = tss_base;
499     env->tr.limit = tss_limit;
500     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
501 
502     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
503         cpu_x86_update_cr3(env, new_cr3);
504     }
505 
506     /* load all registers without an exception, then reload them with
507        possible exception */
508     env->eip = new_eip;
509     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
510         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
511     if (type & 8) {
512         cpu_load_eflags(env, new_eflags, eflags_mask);
513         for (i = 0; i < 8; i++) {
514             env->regs[i] = new_regs[i];
515         }
516     } else {
517         cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
518         for (i = 0; i < 8; i++) {
519             env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
520         }
521     }
522     if (new_eflags & VM_MASK) {
523         for (i = 0; i < 6; i++) {
524             load_seg_vm(env, i, new_segs[i]);
525         }
526     } else {
527         /* first just selectors as the rest may trigger exceptions */
528         for (i = 0; i < 6; i++) {
529             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
530         }
531     }
532 
533     env->ldt.selector = new_ldt & ~4;
534     env->ldt.base = 0;
535     env->ldt.limit = 0;
536     env->ldt.flags = 0;
537 
538     /* load the LDT */
539     if (new_ldt & 4) {
540         raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
541     }
542 
543     if ((new_ldt & 0xfffc) != 0) {
544         dt = &env->gdt;
545         index = new_ldt & ~7;
546         if ((index + 7) > dt->limit) {
547             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
548         }
549         ptr = dt->base + index;
550         e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
551         e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
552         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
553             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
554         }
555         if (!(e2 & DESC_P_MASK)) {
556             raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
557         }
558         load_seg_cache_raw_dt(&env->ldt, e1, e2);
559     }
560 
561     /* load the segments */
562     if (!(new_eflags & VM_MASK)) {
563         int cpl = new_segs[R_CS] & 3;
564         tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
565         tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
566         tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
567         tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
568         tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
569         tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
570     }
571 
572     /* check that env->eip is in the CS segment limits */
573     if (new_eip > env->segs[R_CS].limit) {
574         /* XXX: different exception if CALL? */
575         raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
576     }
577 
578 #ifndef CONFIG_USER_ONLY
579     /* reset local breakpoints */
580     if (env->dr[7] & DR7_LOCAL_BP_MASK) {
581         cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
582     }
583 #endif
584     return type >> 3;
585 }
586 
switch_tss(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip)587 static int switch_tss(CPUX86State *env, int tss_selector,
588                       uint32_t e1, uint32_t e2, int source,
589                       uint32_t next_eip)
590 {
591     return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
592 }
593 
get_sp_mask(unsigned int e2)594 static inline unsigned int get_sp_mask(unsigned int e2)
595 {
596 #ifdef TARGET_X86_64
597     if (e2 & DESC_L_MASK) {
598         return 0;
599     } else
600 #endif
601     if (e2 & DESC_B_MASK) {
602         return 0xffffffff;
603     } else {
604         return 0xffff;
605     }
606 }
607 
exception_is_fault(int intno)608 static int exception_is_fault(int intno)
609 {
610     switch (intno) {
611         /*
612          * #DB can be both fault- and trap-like, but it never sets RF=1
613          * in the RFLAGS value pushed on the stack.
614          */
615     case EXCP01_DB:
616     case EXCP03_INT3:
617     case EXCP04_INTO:
618     case EXCP08_DBLE:
619     case EXCP12_MCHK:
620         return 0;
621     }
622     /* Everything else including reserved exception is a fault.  */
623     return 1;
624 }
625 
exception_has_error_code(int intno)626 int exception_has_error_code(int intno)
627 {
628     switch (intno) {
629     case 8:
630     case 10:
631     case 11:
632     case 12:
633     case 13:
634     case 14:
635     case 17:
636         return 1;
637     }
638     return 0;
639 }
640 
641 /* protected mode interrupt */
do_interrupt_protected(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip,int is_hw)642 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
643                                    int error_code, unsigned int next_eip,
644                                    int is_hw)
645 {
646     SegmentCache *dt;
647     target_ulong ptr;
648     int type, dpl, selector, ss_dpl, cpl;
649     int has_error_code, new_stack, shift;
650     uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
651     uint32_t old_eip, eflags;
652     int vm86 = env->eflags & VM_MASK;
653     StackAccess sa;
654     bool set_rf;
655 
656     has_error_code = 0;
657     if (!is_int && !is_hw) {
658         has_error_code = exception_has_error_code(intno);
659     }
660     if (is_int) {
661         old_eip = next_eip;
662         set_rf = false;
663     } else {
664         old_eip = env->eip;
665         set_rf = exception_is_fault(intno);
666     }
667 
668     dt = &env->idt;
669     if (intno * 8 + 7 > dt->limit) {
670         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
671     }
672     ptr = dt->base + intno * 8;
673     e1 = cpu_ldl_kernel(env, ptr);
674     e2 = cpu_ldl_kernel(env, ptr + 4);
675     /* check gate type */
676     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
677     switch (type) {
678     case 5: /* task gate */
679     case 6: /* 286 interrupt gate */
680     case 7: /* 286 trap gate */
681     case 14: /* 386 interrupt gate */
682     case 15: /* 386 trap gate */
683         break;
684     default:
685         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
686         break;
687     }
688     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689     cpl = env->hflags & HF_CPL_MASK;
690     /* check privilege if software int */
691     if (is_int && dpl < cpl) {
692         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
693     }
694 
695     sa.env = env;
696     sa.ra = 0;
697     sa.mmu_index = cpu_mmu_index_kernel(env);
698 
699     if (type == 5) {
700         /* task gate */
701         /* must do that check here to return the correct error code */
702         if (!(e2 & DESC_P_MASK)) {
703             raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
704         }
705         shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
706         if (has_error_code) {
707             /* push the error code */
708             if (env->segs[R_SS].flags & DESC_B_MASK) {
709                 sa.sp_mask = 0xffffffff;
710             } else {
711                 sa.sp_mask = 0xffff;
712             }
713             sa.sp = env->regs[R_ESP];
714             sa.ss_base = env->segs[R_SS].base;
715             if (shift) {
716                 pushl(&sa, error_code);
717             } else {
718                 pushw(&sa, error_code);
719             }
720             SET_ESP(sa.sp, sa.sp_mask);
721         }
722         return;
723     }
724 
725     /* Otherwise, trap or interrupt gate */
726 
727     /* check valid bit */
728     if (!(e2 & DESC_P_MASK)) {
729         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
730     }
731     selector = e1 >> 16;
732     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
733     if ((selector & 0xfffc) == 0) {
734         raise_exception_err(env, EXCP0D_GPF, 0);
735     }
736     if (load_segment(env, &e1, &e2, selector) != 0) {
737         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
738     }
739     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
740         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
741     }
742     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
743     if (dpl > cpl) {
744         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
745     }
746     if (!(e2 & DESC_P_MASK)) {
747         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
748     }
749     if (e2 & DESC_C_MASK) {
750         dpl = cpl;
751     }
752     if (dpl < cpl) {
753         /* to inner privilege */
754         uint32_t esp;
755         get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
756         if ((ss & 0xfffc) == 0) {
757             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
758         }
759         if ((ss & 3) != dpl) {
760             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
761         }
762         if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
763             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
764         }
765         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
766         if (ss_dpl != dpl) {
767             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
768         }
769         if (!(ss_e2 & DESC_S_MASK) ||
770             (ss_e2 & DESC_CS_MASK) ||
771             !(ss_e2 & DESC_W_MASK)) {
772             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
773         }
774         if (!(ss_e2 & DESC_P_MASK)) {
775             raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
776         }
777         new_stack = 1;
778         sa.sp = esp;
779         sa.sp_mask = get_sp_mask(ss_e2);
780         sa.ss_base = get_seg_base(ss_e1, ss_e2);
781     } else  {
782         /* to same privilege */
783         if (vm86) {
784             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
785         }
786         new_stack = 0;
787         sa.sp = env->regs[R_ESP];
788         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
789         sa.ss_base = env->segs[R_SS].base;
790     }
791 
792     shift = type >> 3;
793 
794 #if 0
795     /* XXX: check that enough room is available */
796     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
797     if (vm86) {
798         push_size += 8;
799     }
800     push_size <<= shift;
801 #endif
802     eflags = cpu_compute_eflags(env);
803     /*
804      * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
805      * as is.  AMD behavior could be implemented in check_hw_breakpoints().
806      */
807     if (set_rf) {
808         eflags |= RF_MASK;
809     }
810 
811     if (shift == 1) {
812         if (new_stack) {
813             if (vm86) {
814                 pushl(&sa, env->segs[R_GS].selector);
815                 pushl(&sa, env->segs[R_FS].selector);
816                 pushl(&sa, env->segs[R_DS].selector);
817                 pushl(&sa, env->segs[R_ES].selector);
818             }
819             pushl(&sa, env->segs[R_SS].selector);
820             pushl(&sa, env->regs[R_ESP]);
821         }
822         pushl(&sa, eflags);
823         pushl(&sa, env->segs[R_CS].selector);
824         pushl(&sa, old_eip);
825         if (has_error_code) {
826             pushl(&sa, error_code);
827         }
828     } else {
829         if (new_stack) {
830             if (vm86) {
831                 pushw(&sa, env->segs[R_GS].selector);
832                 pushw(&sa, env->segs[R_FS].selector);
833                 pushw(&sa, env->segs[R_DS].selector);
834                 pushw(&sa, env->segs[R_ES].selector);
835             }
836             pushw(&sa, env->segs[R_SS].selector);
837             pushw(&sa, env->regs[R_ESP]);
838         }
839         pushw(&sa, eflags);
840         pushw(&sa, env->segs[R_CS].selector);
841         pushw(&sa, old_eip);
842         if (has_error_code) {
843             pushw(&sa, error_code);
844         }
845     }
846 
847     /* interrupt gate clear IF mask */
848     if ((type & 1) == 0) {
849         env->eflags &= ~IF_MASK;
850     }
851     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
852 
853     if (new_stack) {
854         if (vm86) {
855             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
856             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
857             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
858             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
859         }
860         ss = (ss & ~3) | dpl;
861         cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
862                                get_seg_limit(ss_e1, ss_e2), ss_e2);
863     }
864     SET_ESP(sa.sp, sa.sp_mask);
865 
866     selector = (selector & ~3) | dpl;
867     cpu_x86_load_seg_cache(env, R_CS, selector,
868                    get_seg_base(e1, e2),
869                    get_seg_limit(e1, e2),
870                    e2);
871     env->eip = offset;
872 }
873 
874 #ifdef TARGET_X86_64
875 
pushq(StackAccess * sa,uint64_t val)876 static void pushq(StackAccess *sa, uint64_t val)
877 {
878     sa->sp -= 8;
879     cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
880 }
881 
popq(StackAccess * sa)882 static uint64_t popq(StackAccess *sa)
883 {
884     uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
885     sa->sp += 8;
886     return ret;
887 }
888 
get_rsp_from_tss(CPUX86State * env,int level)889 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
890 {
891     X86CPU *cpu = env_archcpu(env);
892     int index, pg_mode;
893     target_ulong rsp;
894     int32_t sext;
895 
896 #if 0
897     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
898            env->tr.base, env->tr.limit);
899 #endif
900 
901     if (!(env->tr.flags & DESC_P_MASK)) {
902         cpu_abort(CPU(cpu), "invalid tss");
903     }
904     index = 8 * level + 4;
905     if ((index + 7) > env->tr.limit) {
906         raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
907     }
908 
909     rsp = cpu_ldq_kernel(env, env->tr.base + index);
910 
911     /* test virtual address sign extension */
912     pg_mode = get_pg_mode(env);
913     sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
914     if (sext != 0 && sext != -1) {
915         raise_exception_err(env, EXCP0C_STACK, 0);
916     }
917 
918     return rsp;
919 }
920 
921 /* 64 bit interrupt */
do_interrupt64(CPUX86State * env,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)922 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
923                            int error_code, target_ulong next_eip, int is_hw)
924 {
925     SegmentCache *dt;
926     target_ulong ptr;
927     int type, dpl, selector, cpl, ist;
928     int has_error_code, new_stack;
929     uint32_t e1, e2, e3, eflags;
930     target_ulong old_eip, offset;
931     bool set_rf;
932     StackAccess sa;
933 
934     has_error_code = 0;
935     if (!is_int && !is_hw) {
936         has_error_code = exception_has_error_code(intno);
937     }
938     if (is_int) {
939         old_eip = next_eip;
940         set_rf = false;
941     } else {
942         old_eip = env->eip;
943         set_rf = exception_is_fault(intno);
944     }
945 
946     dt = &env->idt;
947     if (intno * 16 + 15 > dt->limit) {
948         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
949     }
950     ptr = dt->base + intno * 16;
951     e1 = cpu_ldl_kernel(env, ptr);
952     e2 = cpu_ldl_kernel(env, ptr + 4);
953     e3 = cpu_ldl_kernel(env, ptr + 8);
954     /* check gate type */
955     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
956     switch (type) {
957     case 14: /* 386 interrupt gate */
958     case 15: /* 386 trap gate */
959         break;
960     default:
961         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
962         break;
963     }
964     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
965     cpl = env->hflags & HF_CPL_MASK;
966     /* check privilege if software int */
967     if (is_int && dpl < cpl) {
968         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
969     }
970     /* check valid bit */
971     if (!(e2 & DESC_P_MASK)) {
972         raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
973     }
974     selector = e1 >> 16;
975     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
976     ist = e2 & 7;
977     if ((selector & 0xfffc) == 0) {
978         raise_exception_err(env, EXCP0D_GPF, 0);
979     }
980 
981     if (load_segment(env, &e1, &e2, selector) != 0) {
982         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
983     }
984     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
985         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
986     }
987     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
988     if (dpl > cpl) {
989         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
990     }
991     if (!(e2 & DESC_P_MASK)) {
992         raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
993     }
994     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
995         raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
996     }
997     if (e2 & DESC_C_MASK) {
998         dpl = cpl;
999     }
1000 
1001     sa.env = env;
1002     sa.ra = 0;
1003     sa.mmu_index = cpu_mmu_index_kernel(env);
1004     sa.sp_mask = -1;
1005     sa.ss_base = 0;
1006     if (dpl < cpl || ist != 0) {
1007         /* to inner privilege */
1008         new_stack = 1;
1009         sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1010     } else {
1011         /* to same privilege */
1012         if (env->eflags & VM_MASK) {
1013             raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1014         }
1015         new_stack = 0;
1016         sa.sp = env->regs[R_ESP];
1017     }
1018     sa.sp &= ~0xfLL; /* align stack */
1019 
1020     /* See do_interrupt_protected.  */
1021     eflags = cpu_compute_eflags(env);
1022     if (set_rf) {
1023         eflags |= RF_MASK;
1024     }
1025 
1026     pushq(&sa, env->segs[R_SS].selector);
1027     pushq(&sa, env->regs[R_ESP]);
1028     pushq(&sa, eflags);
1029     pushq(&sa, env->segs[R_CS].selector);
1030     pushq(&sa, old_eip);
1031     if (has_error_code) {
1032         pushq(&sa, error_code);
1033     }
1034 
1035     /* interrupt gate clear IF mask */
1036     if ((type & 1) == 0) {
1037         env->eflags &= ~IF_MASK;
1038     }
1039     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1040 
1041     if (new_stack) {
1042         uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1043         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1044     }
1045     env->regs[R_ESP] = sa.sp;
1046 
1047     selector = (selector & ~3) | dpl;
1048     cpu_x86_load_seg_cache(env, R_CS, selector,
1049                    get_seg_base(e1, e2),
1050                    get_seg_limit(e1, e2),
1051                    e2);
1052     env->eip = offset;
1053 }
1054 #endif /* TARGET_X86_64 */
1055 
helper_sysret(CPUX86State * env,int dflag)1056 void helper_sysret(CPUX86State *env, int dflag)
1057 {
1058     int cpl, selector;
1059 
1060     if (!(env->efer & MSR_EFER_SCE)) {
1061         raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1062     }
1063     cpl = env->hflags & HF_CPL_MASK;
1064     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1065         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1066     }
1067     selector = (env->star >> 48) & 0xffff;
1068 #ifdef TARGET_X86_64
1069     if (env->hflags & HF_LMA_MASK) {
1070         cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1071                         | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1072                         NT_MASK);
1073         if (dflag == 2) {
1074             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1075                                    0, 0xffffffff,
1076                                    DESC_G_MASK | DESC_P_MASK |
1077                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1079                                    DESC_L_MASK);
1080             env->eip = env->regs[R_ECX];
1081         } else {
1082             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1083                                    0, 0xffffffff,
1084                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1087             env->eip = (uint32_t)env->regs[R_ECX];
1088         }
1089         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1090                                0, 0xffffffff,
1091                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093                                DESC_W_MASK | DESC_A_MASK);
1094     } else
1095 #endif
1096     {
1097         env->eflags |= IF_MASK;
1098         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1099                                0, 0xffffffff,
1100                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1103         env->eip = (uint32_t)env->regs[R_ECX];
1104         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1105                                0, 0xffffffff,
1106                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1107                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1108                                DESC_W_MASK | DESC_A_MASK);
1109     }
1110 }
1111 
1112 /* real mode interrupt */
do_interrupt_real(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip)1113 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1114                               int error_code, unsigned int next_eip)
1115 {
1116     SegmentCache *dt;
1117     target_ulong ptr;
1118     int selector;
1119     uint32_t offset;
1120     uint32_t old_cs, old_eip;
1121     StackAccess sa;
1122 
1123     /* real mode (simpler!) */
1124     dt = &env->idt;
1125     if (intno * 4 + 3 > dt->limit) {
1126         raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1127     }
1128     ptr = dt->base + intno * 4;
1129     offset = cpu_lduw_kernel(env, ptr);
1130     selector = cpu_lduw_kernel(env, ptr + 2);
1131 
1132     sa.env = env;
1133     sa.ra = 0;
1134     sa.sp = env->regs[R_ESP];
1135     sa.sp_mask = 0xffff;
1136     sa.ss_base = env->segs[R_SS].base;
1137     sa.mmu_index = cpu_mmu_index_kernel(env);
1138 
1139     if (is_int) {
1140         old_eip = next_eip;
1141     } else {
1142         old_eip = env->eip;
1143     }
1144     old_cs = env->segs[R_CS].selector;
1145     /* XXX: use SS segment size? */
1146     pushw(&sa, cpu_compute_eflags(env));
1147     pushw(&sa, old_cs);
1148     pushw(&sa, old_eip);
1149 
1150     /* update processor state */
1151     SET_ESP(sa.sp, sa.sp_mask);
1152     env->eip = offset;
1153     env->segs[R_CS].selector = selector;
1154     env->segs[R_CS].base = (selector << 4);
1155     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1156 }
1157 
1158 /*
1159  * Begin execution of an interruption. is_int is TRUE if coming from
1160  * the int instruction. next_eip is the env->eip value AFTER the interrupt
1161  * instruction. It is only relevant if is_int is TRUE.
1162  */
do_interrupt_all(X86CPU * cpu,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)1163 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1164                       int error_code, target_ulong next_eip, int is_hw)
1165 {
1166     CPUX86State *env = &cpu->env;
1167 
1168     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1169         if ((env->cr[0] & CR0_PE_MASK)) {
1170             static int count;
1171 
1172             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1173                      " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1174                      count, intno, error_code, is_int,
1175                      env->hflags & HF_CPL_MASK,
1176                      env->segs[R_CS].selector, env->eip,
1177                      (int)env->segs[R_CS].base + env->eip,
1178                      env->segs[R_SS].selector, env->regs[R_ESP]);
1179             if (intno == 0x0e) {
1180                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1181             } else {
1182                 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1183             }
1184             qemu_log("\n");
1185             log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1186 #if 0
1187             {
1188                 int i;
1189                 target_ulong ptr;
1190 
1191                 qemu_log("       code=");
1192                 ptr = env->segs[R_CS].base + env->eip;
1193                 for (i = 0; i < 16; i++) {
1194                     qemu_log(" %02x", ldub(ptr + i));
1195                 }
1196                 qemu_log("\n");
1197             }
1198 #endif
1199             count++;
1200         }
1201     }
1202     if (env->cr[0] & CR0_PE_MASK) {
1203 #if !defined(CONFIG_USER_ONLY)
1204         if (env->hflags & HF_GUEST_MASK) {
1205             handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1206         }
1207 #endif
1208 #ifdef TARGET_X86_64
1209         if (env->hflags & HF_LMA_MASK) {
1210             do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1211         } else
1212 #endif
1213         {
1214             do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1215                                    is_hw);
1216         }
1217     } else {
1218 #if !defined(CONFIG_USER_ONLY)
1219         if (env->hflags & HF_GUEST_MASK) {
1220             handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1221         }
1222 #endif
1223         do_interrupt_real(env, intno, is_int, error_code, next_eip);
1224     }
1225 
1226 #if !defined(CONFIG_USER_ONLY)
1227     if (env->hflags & HF_GUEST_MASK) {
1228         CPUState *cs = CPU(cpu);
1229         uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1230                                       offsetof(struct vmcb,
1231                                                control.event_inj));
1232 
1233         x86_stl_phys(cs,
1234                  env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1235                  event_inj & ~SVM_EVTINJ_VALID);
1236     }
1237 #endif
1238 }
1239 
do_interrupt_x86_hardirq(CPUX86State * env,int intno,int is_hw)1240 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1241 {
1242     do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1243 }
1244 
helper_lldt(CPUX86State * env,int selector)1245 void helper_lldt(CPUX86State *env, int selector)
1246 {
1247     SegmentCache *dt;
1248     uint32_t e1, e2;
1249     int index, entry_limit;
1250     target_ulong ptr;
1251 
1252     selector &= 0xffff;
1253     if ((selector & 0xfffc) == 0) {
1254         /* XXX: NULL selector case: invalid LDT */
1255         env->ldt.base = 0;
1256         env->ldt.limit = 0;
1257     } else {
1258         if (selector & 0x4) {
1259             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1260         }
1261         dt = &env->gdt;
1262         index = selector & ~7;
1263 #ifdef TARGET_X86_64
1264         if (env->hflags & HF_LMA_MASK) {
1265             entry_limit = 15;
1266         } else
1267 #endif
1268         {
1269             entry_limit = 7;
1270         }
1271         if ((index + entry_limit) > dt->limit) {
1272             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1273         }
1274         ptr = dt->base + index;
1275         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1276         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1277         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1278             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1279         }
1280         if (!(e2 & DESC_P_MASK)) {
1281             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1282         }
1283 #ifdef TARGET_X86_64
1284         if (env->hflags & HF_LMA_MASK) {
1285             uint32_t e3;
1286 
1287             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1288             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1289             env->ldt.base |= (target_ulong)e3 << 32;
1290         } else
1291 #endif
1292         {
1293             load_seg_cache_raw_dt(&env->ldt, e1, e2);
1294         }
1295     }
1296     env->ldt.selector = selector;
1297 }
1298 
helper_ltr(CPUX86State * env,int selector)1299 void helper_ltr(CPUX86State *env, int selector)
1300 {
1301     SegmentCache *dt;
1302     uint32_t e1, e2;
1303     int index, type, entry_limit;
1304     target_ulong ptr;
1305 
1306     selector &= 0xffff;
1307     if ((selector & 0xfffc) == 0) {
1308         /* NULL selector case: invalid TR */
1309         env->tr.base = 0;
1310         env->tr.limit = 0;
1311         env->tr.flags = 0;
1312     } else {
1313         if (selector & 0x4) {
1314             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1315         }
1316         dt = &env->gdt;
1317         index = selector & ~7;
1318 #ifdef TARGET_X86_64
1319         if (env->hflags & HF_LMA_MASK) {
1320             entry_limit = 15;
1321         } else
1322 #endif
1323         {
1324             entry_limit = 7;
1325         }
1326         if ((index + entry_limit) > dt->limit) {
1327             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1328         }
1329         ptr = dt->base + index;
1330         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1331         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1332         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1333         if ((e2 & DESC_S_MASK) ||
1334             (type != 1 && type != 9)) {
1335             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1336         }
1337         if (!(e2 & DESC_P_MASK)) {
1338             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1339         }
1340 #ifdef TARGET_X86_64
1341         if (env->hflags & HF_LMA_MASK) {
1342             uint32_t e3, e4;
1343 
1344             e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1345             e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1346             if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1347                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1348             }
1349             load_seg_cache_raw_dt(&env->tr, e1, e2);
1350             env->tr.base |= (target_ulong)e3 << 32;
1351         } else
1352 #endif
1353         {
1354             load_seg_cache_raw_dt(&env->tr, e1, e2);
1355         }
1356         e2 |= DESC_TSS_BUSY_MASK;
1357         cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1358     }
1359     env->tr.selector = selector;
1360 }
1361 
1362 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
helper_load_seg(CPUX86State * env,int seg_reg,int selector)1363 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1364 {
1365     uint32_t e1, e2;
1366     int cpl, dpl, rpl;
1367     SegmentCache *dt;
1368     int index;
1369     target_ulong ptr;
1370 
1371     selector &= 0xffff;
1372     cpl = env->hflags & HF_CPL_MASK;
1373     if ((selector & 0xfffc) == 0) {
1374         /* null selector case */
1375         if (seg_reg == R_SS
1376 #ifdef TARGET_X86_64
1377             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1378 #endif
1379             ) {
1380             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1381         }
1382         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1383     } else {
1384 
1385         if (selector & 0x4) {
1386             dt = &env->ldt;
1387         } else {
1388             dt = &env->gdt;
1389         }
1390         index = selector & ~7;
1391         if ((index + 7) > dt->limit) {
1392             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1393         }
1394         ptr = dt->base + index;
1395         e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1396         e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1397 
1398         if (!(e2 & DESC_S_MASK)) {
1399             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1400         }
1401         rpl = selector & 3;
1402         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1403         if (seg_reg == R_SS) {
1404             /* must be writable segment */
1405             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1406                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1407             }
1408             if (rpl != cpl || dpl != cpl) {
1409                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1410             }
1411         } else {
1412             /* must be readable segment */
1413             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1414                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1415             }
1416 
1417             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1418                 /* if not conforming code, test rights */
1419                 if (dpl < cpl || dpl < rpl) {
1420                     raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1421                 }
1422             }
1423         }
1424 
1425         if (!(e2 & DESC_P_MASK)) {
1426             if (seg_reg == R_SS) {
1427                 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1428             } else {
1429                 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1430             }
1431         }
1432 
1433         /* set the access bit if not already set */
1434         if (!(e2 & DESC_A_MASK)) {
1435             e2 |= DESC_A_MASK;
1436             cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1437         }
1438 
1439         cpu_x86_load_seg_cache(env, seg_reg, selector,
1440                        get_seg_base(e1, e2),
1441                        get_seg_limit(e1, e2),
1442                        e2);
1443 #if 0
1444         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1445                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1446 #endif
1447     }
1448 }
1449 
1450 /* protected mode jump */
helper_ljmp_protected(CPUX86State * env,int new_cs,target_ulong new_eip,target_ulong next_eip)1451 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1452                            target_ulong next_eip)
1453 {
1454     int gate_cs, type;
1455     uint32_t e1, e2, cpl, dpl, rpl, limit;
1456 
1457     if ((new_cs & 0xfffc) == 0) {
1458         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1459     }
1460     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1461         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1462     }
1463     cpl = env->hflags & HF_CPL_MASK;
1464     if (e2 & DESC_S_MASK) {
1465         if (!(e2 & DESC_CS_MASK)) {
1466             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1467         }
1468         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1469         if (e2 & DESC_C_MASK) {
1470             /* conforming code segment */
1471             if (dpl > cpl) {
1472                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1473             }
1474         } else {
1475             /* non conforming code segment */
1476             rpl = new_cs & 3;
1477             if (rpl > cpl) {
1478                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1479             }
1480             if (dpl != cpl) {
1481                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1482             }
1483         }
1484         if (!(e2 & DESC_P_MASK)) {
1485             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1486         }
1487         limit = get_seg_limit(e1, e2);
1488         if (new_eip > limit &&
1489             (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1490             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1491         }
1492         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1493                        get_seg_base(e1, e2), limit, e2);
1494         env->eip = new_eip;
1495     } else {
1496         /* jump to call or task gate */
1497         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1498         rpl = new_cs & 3;
1499         cpl = env->hflags & HF_CPL_MASK;
1500         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1501 
1502 #ifdef TARGET_X86_64
1503         if (env->efer & MSR_EFER_LMA) {
1504             if (type != 12) {
1505                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1506             }
1507         }
1508 #endif
1509         switch (type) {
1510         case 1: /* 286 TSS */
1511         case 9: /* 386 TSS */
1512         case 5: /* task gate */
1513             if (dpl < cpl || dpl < rpl) {
1514                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1515             }
1516             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1517             break;
1518         case 4: /* 286 call gate */
1519         case 12: /* 386 call gate */
1520             if ((dpl < cpl) || (dpl < rpl)) {
1521                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1522             }
1523             if (!(e2 & DESC_P_MASK)) {
1524                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1525             }
1526             gate_cs = e1 >> 16;
1527             new_eip = (e1 & 0xffff);
1528             if (type == 12) {
1529                 new_eip |= (e2 & 0xffff0000);
1530             }
1531 
1532 #ifdef TARGET_X86_64
1533             if (env->efer & MSR_EFER_LMA) {
1534                 /* load the upper 8 bytes of the 64-bit call gate */
1535                 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1536                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1537                                            GETPC());
1538                 }
1539                 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1540                 if (type != 0) {
1541                     raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1542                                            GETPC());
1543                 }
1544                 new_eip |= ((target_ulong)e1) << 32;
1545             }
1546 #endif
1547 
1548             if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1549                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1550             }
1551             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1552             /* must be code segment */
1553             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1554                  (DESC_S_MASK | DESC_CS_MASK))) {
1555                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1556             }
1557             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1558                 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1559                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1560             }
1561 #ifdef TARGET_X86_64
1562             if (env->efer & MSR_EFER_LMA) {
1563                 if (!(e2 & DESC_L_MASK)) {
1564                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1565                 }
1566                 if (e2 & DESC_B_MASK) {
1567                     raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1568                 }
1569             }
1570 #endif
1571             if (!(e2 & DESC_P_MASK)) {
1572                 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1573             }
1574             limit = get_seg_limit(e1, e2);
1575             if (new_eip > limit &&
1576                 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1577                 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1578             }
1579             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1580                                    get_seg_base(e1, e2), limit, e2);
1581             env->eip = new_eip;
1582             break;
1583         default:
1584             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1585             break;
1586         }
1587     }
1588 }
1589 
1590 /* real mode call */
helper_lcall_real(CPUX86State * env,uint32_t new_cs,uint32_t new_eip,int shift,uint32_t next_eip)1591 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1592                        int shift, uint32_t next_eip)
1593 {
1594     StackAccess sa;
1595 
1596     sa.env = env;
1597     sa.ra = GETPC();
1598     sa.sp = env->regs[R_ESP];
1599     sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1600     sa.ss_base = env->segs[R_SS].base;
1601     sa.mmu_index = cpu_mmu_index_kernel(env);
1602 
1603     if (shift) {
1604         pushl(&sa, env->segs[R_CS].selector);
1605         pushl(&sa, next_eip);
1606     } else {
1607         pushw(&sa, env->segs[R_CS].selector);
1608         pushw(&sa, next_eip);
1609     }
1610 
1611     SET_ESP(sa.sp, sa.sp_mask);
1612     env->eip = new_eip;
1613     env->segs[R_CS].selector = new_cs;
1614     env->segs[R_CS].base = (new_cs << 4);
1615 }
1616 
1617 /* protected mode call */
helper_lcall_protected(CPUX86State * env,int new_cs,target_ulong new_eip,int shift,target_ulong next_eip)1618 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1619                             int shift, target_ulong next_eip)
1620 {
1621     int new_stack, i;
1622     uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1623     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1624     uint32_t val, limit, old_sp_mask;
1625     target_ulong old_ssp, offset;
1626     StackAccess sa;
1627 
1628     LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1629     LOG_PCALL_STATE(env_cpu(env));
1630     if ((new_cs & 0xfffc) == 0) {
1631         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1632     }
1633     if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1634         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1635     }
1636     cpl = env->hflags & HF_CPL_MASK;
1637     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1638 
1639     sa.env = env;
1640     sa.ra = GETPC();
1641     sa.mmu_index = cpu_mmu_index_kernel(env);
1642 
1643     if (e2 & DESC_S_MASK) {
1644         if (!(e2 & DESC_CS_MASK)) {
1645             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1646         }
1647         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1648         if (e2 & DESC_C_MASK) {
1649             /* conforming code segment */
1650             if (dpl > cpl) {
1651                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1652             }
1653         } else {
1654             /* non conforming code segment */
1655             rpl = new_cs & 3;
1656             if (rpl > cpl) {
1657                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1658             }
1659             if (dpl != cpl) {
1660                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1661             }
1662         }
1663         if (!(e2 & DESC_P_MASK)) {
1664             raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1665         }
1666 
1667 #ifdef TARGET_X86_64
1668         /* XXX: check 16/32 bit cases in long mode */
1669         if (shift == 2) {
1670             /* 64 bit case */
1671             sa.sp = env->regs[R_ESP];
1672             sa.sp_mask = -1;
1673             sa.ss_base = 0;
1674             pushq(&sa, env->segs[R_CS].selector);
1675             pushq(&sa, next_eip);
1676             /* from this point, not restartable */
1677             env->regs[R_ESP] = sa.sp;
1678             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1679                                    get_seg_base(e1, e2),
1680                                    get_seg_limit(e1, e2), e2);
1681             env->eip = new_eip;
1682         } else
1683 #endif
1684         {
1685             sa.sp = env->regs[R_ESP];
1686             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1687             sa.ss_base = env->segs[R_SS].base;
1688             if (shift) {
1689                 pushl(&sa, env->segs[R_CS].selector);
1690                 pushl(&sa, next_eip);
1691             } else {
1692                 pushw(&sa, env->segs[R_CS].selector);
1693                 pushw(&sa, next_eip);
1694             }
1695 
1696             limit = get_seg_limit(e1, e2);
1697             if (new_eip > limit) {
1698                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1699             }
1700             /* from this point, not restartable */
1701             SET_ESP(sa.sp, sa.sp_mask);
1702             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1703                                    get_seg_base(e1, e2), limit, e2);
1704             env->eip = new_eip;
1705         }
1706     } else {
1707         /* check gate type */
1708         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1709         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1710         rpl = new_cs & 3;
1711 
1712 #ifdef TARGET_X86_64
1713         if (env->efer & MSR_EFER_LMA) {
1714             if (type != 12) {
1715                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1716             }
1717         }
1718 #endif
1719 
1720         switch (type) {
1721         case 1: /* available 286 TSS */
1722         case 9: /* available 386 TSS */
1723         case 5: /* task gate */
1724             if (dpl < cpl || dpl < rpl) {
1725                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1726             }
1727             switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1728             return;
1729         case 4: /* 286 call gate */
1730         case 12: /* 386 call gate */
1731             break;
1732         default:
1733             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1734             break;
1735         }
1736         shift = type >> 3;
1737 
1738         if (dpl < cpl || dpl < rpl) {
1739             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1740         }
1741         /* check valid bit */
1742         if (!(e2 & DESC_P_MASK)) {
1743             raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1744         }
1745         selector = e1 >> 16;
1746         param_count = e2 & 0x1f;
1747         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1748 #ifdef TARGET_X86_64
1749         if (env->efer & MSR_EFER_LMA) {
1750             /* load the upper 8 bytes of the 64-bit call gate */
1751             if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1752                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1753                                        GETPC());
1754             }
1755             type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1756             if (type != 0) {
1757                 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1758                                        GETPC());
1759             }
1760             offset |= ((target_ulong)e1) << 32;
1761         }
1762 #endif
1763         if ((selector & 0xfffc) == 0) {
1764             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1765         }
1766 
1767         if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1768             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1769         }
1770         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1771             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1772         }
1773         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1774         if (dpl > cpl) {
1775             raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1776         }
1777 #ifdef TARGET_X86_64
1778         if (env->efer & MSR_EFER_LMA) {
1779             if (!(e2 & DESC_L_MASK)) {
1780                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1781             }
1782             if (e2 & DESC_B_MASK) {
1783                 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1784             }
1785             shift++;
1786         }
1787 #endif
1788         if (!(e2 & DESC_P_MASK)) {
1789             raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1790         }
1791 
1792         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1793             /* to inner privilege */
1794 #ifdef TARGET_X86_64
1795             if (shift == 2) {
1796                 ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1797                 new_stack = 1;
1798                 sa.sp = get_rsp_from_tss(env, dpl);
1799                 sa.sp_mask = -1;
1800                 sa.ss_base = 0;  /* SS base is always zero in IA-32e mode */
1801                 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1802                           TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1803             } else
1804 #endif
1805             {
1806                 uint32_t sp32;
1807                 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1808                 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1809                           TARGET_FMT_lx "\n", ss, sp32, param_count,
1810                           env->regs[R_ESP]);
1811                 if ((ss & 0xfffc) == 0) {
1812                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1813                 }
1814                 if ((ss & 3) != dpl) {
1815                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1816                 }
1817                 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1818                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1819                 }
1820                 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1821                 if (ss_dpl != dpl) {
1822                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1823                 }
1824                 if (!(ss_e2 & DESC_S_MASK) ||
1825                     (ss_e2 & DESC_CS_MASK) ||
1826                     !(ss_e2 & DESC_W_MASK)) {
1827                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1828                 }
1829                 if (!(ss_e2 & DESC_P_MASK)) {
1830                     raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1831                 }
1832 
1833                 sa.sp = sp32;
1834                 sa.sp_mask = get_sp_mask(ss_e2);
1835                 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1836             }
1837 
1838             /* push_size = ((param_count * 2) + 8) << shift; */
1839             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1840             old_ssp = env->segs[R_SS].base;
1841 
1842 #ifdef TARGET_X86_64
1843             if (shift == 2) {
1844                 /* XXX: verify if new stack address is canonical */
1845                 pushq(&sa, env->segs[R_SS].selector);
1846                 pushq(&sa, env->regs[R_ESP]);
1847                 /* parameters aren't supported for 64-bit call gates */
1848             } else
1849 #endif
1850             if (shift == 1) {
1851                 pushl(&sa, env->segs[R_SS].selector);
1852                 pushl(&sa, env->regs[R_ESP]);
1853                 for (i = param_count - 1; i >= 0; i--) {
1854                     val = cpu_ldl_data_ra(env,
1855                                           old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1856                                           GETPC());
1857                     pushl(&sa, val);
1858                 }
1859             } else {
1860                 pushw(&sa, env->segs[R_SS].selector);
1861                 pushw(&sa, env->regs[R_ESP]);
1862                 for (i = param_count - 1; i >= 0; i--) {
1863                     val = cpu_lduw_data_ra(env,
1864                                            old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1865                                            GETPC());
1866                     pushw(&sa, val);
1867                 }
1868             }
1869             new_stack = 1;
1870         } else {
1871             /* to same privilege */
1872             sa.sp = env->regs[R_ESP];
1873             sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1874             sa.ss_base = env->segs[R_SS].base;
1875             /* push_size = (4 << shift); */
1876             new_stack = 0;
1877         }
1878 
1879 #ifdef TARGET_X86_64
1880         if (shift == 2) {
1881             pushq(&sa, env->segs[R_CS].selector);
1882             pushq(&sa, next_eip);
1883         } else
1884 #endif
1885         if (shift == 1) {
1886             pushl(&sa, env->segs[R_CS].selector);
1887             pushl(&sa, next_eip);
1888         } else {
1889             pushw(&sa, env->segs[R_CS].selector);
1890             pushw(&sa, next_eip);
1891         }
1892 
1893         /* from this point, not restartable */
1894 
1895         if (new_stack) {
1896 #ifdef TARGET_X86_64
1897             if (shift == 2) {
1898                 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1899             } else
1900 #endif
1901             {
1902                 ss = (ss & ~3) | dpl;
1903                 cpu_x86_load_seg_cache(env, R_SS, ss,
1904                                        sa.ss_base,
1905                                        get_seg_limit(ss_e1, ss_e2),
1906                                        ss_e2);
1907             }
1908         }
1909 
1910         selector = (selector & ~3) | dpl;
1911         cpu_x86_load_seg_cache(env, R_CS, selector,
1912                        get_seg_base(e1, e2),
1913                        get_seg_limit(e1, e2),
1914                        e2);
1915         SET_ESP(sa.sp, sa.sp_mask);
1916         env->eip = offset;
1917     }
1918 }
1919 
1920 /* real and vm86 mode iret */
helper_iret_real(CPUX86State * env,int shift)1921 void helper_iret_real(CPUX86State *env, int shift)
1922 {
1923     uint32_t new_cs, new_eip, new_eflags;
1924     int eflags_mask;
1925     StackAccess sa;
1926 
1927     sa.env = env;
1928     sa.ra = GETPC();
1929     sa.mmu_index = x86_mmu_index_pl(env, 0);
1930     sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1931     sa.sp = env->regs[R_ESP];
1932     sa.ss_base = env->segs[R_SS].base;
1933 
1934     if (shift == 1) {
1935         /* 32 bits */
1936         new_eip = popl(&sa);
1937         new_cs = popl(&sa) & 0xffff;
1938         new_eflags = popl(&sa);
1939     } else {
1940         /* 16 bits */
1941         new_eip = popw(&sa);
1942         new_cs = popw(&sa);
1943         new_eflags = popw(&sa);
1944     }
1945     SET_ESP(sa.sp, sa.sp_mask);
1946     env->segs[R_CS].selector = new_cs;
1947     env->segs[R_CS].base = (new_cs << 4);
1948     env->eip = new_eip;
1949     if (env->eflags & VM_MASK) {
1950         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1951             NT_MASK;
1952     } else {
1953         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1954             RF_MASK | NT_MASK;
1955     }
1956     if (shift == 0) {
1957         eflags_mask &= 0xffff;
1958     }
1959     cpu_load_eflags(env, new_eflags, eflags_mask);
1960     env->hflags2 &= ~HF2_NMI_MASK;
1961 }
1962 
validate_seg(CPUX86State * env,X86Seg seg_reg,int cpl)1963 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1964 {
1965     int dpl;
1966     uint32_t e2;
1967 
1968     /* XXX: on x86_64, we do not want to nullify FS and GS because
1969        they may still contain a valid base. I would be interested to
1970        know how a real x86_64 CPU behaves */
1971     if ((seg_reg == R_FS || seg_reg == R_GS) &&
1972         (env->segs[seg_reg].selector & 0xfffc) == 0) {
1973         return;
1974     }
1975 
1976     e2 = env->segs[seg_reg].flags;
1977     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1978     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1979         /* data or non conforming code segment */
1980         if (dpl < cpl) {
1981             cpu_x86_load_seg_cache(env, seg_reg, 0,
1982                                    env->segs[seg_reg].base,
1983                                    env->segs[seg_reg].limit,
1984                                    env->segs[seg_reg].flags & ~DESC_P_MASK);
1985         }
1986     }
1987 }
1988 
1989 /* protected mode iret */
helper_ret_protected(CPUX86State * env,int shift,int is_iret,int addend,uintptr_t retaddr)1990 static inline void helper_ret_protected(CPUX86State *env, int shift,
1991                                         int is_iret, int addend,
1992                                         uintptr_t retaddr)
1993 {
1994     uint32_t new_cs, new_eflags, new_ss;
1995     uint32_t new_es, new_ds, new_fs, new_gs;
1996     uint32_t e1, e2, ss_e1, ss_e2;
1997     int cpl, dpl, rpl, eflags_mask, iopl;
1998     target_ulong new_eip, new_esp;
1999     StackAccess sa;
2000 
2001     cpl = env->hflags & HF_CPL_MASK;
2002 
2003     sa.env = env;
2004     sa.ra = retaddr;
2005     sa.mmu_index = x86_mmu_index_pl(env, cpl);
2006 
2007 #ifdef TARGET_X86_64
2008     if (shift == 2) {
2009         sa.sp_mask = -1;
2010     } else
2011 #endif
2012     {
2013         sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2014     }
2015     sa.sp = env->regs[R_ESP];
2016     sa.ss_base = env->segs[R_SS].base;
2017     new_eflags = 0; /* avoid warning */
2018 #ifdef TARGET_X86_64
2019     if (shift == 2) {
2020         new_eip = popq(&sa);
2021         new_cs = popq(&sa) & 0xffff;
2022         if (is_iret) {
2023             new_eflags = popq(&sa);
2024         }
2025     } else
2026 #endif
2027     {
2028         if (shift == 1) {
2029             /* 32 bits */
2030             new_eip = popl(&sa);
2031             new_cs = popl(&sa) & 0xffff;
2032             if (is_iret) {
2033                 new_eflags = popl(&sa);
2034                 if (new_eflags & VM_MASK) {
2035                     goto return_to_vm86;
2036                 }
2037             }
2038         } else {
2039             /* 16 bits */
2040             new_eip = popw(&sa);
2041             new_cs = popw(&sa);
2042             if (is_iret) {
2043                 new_eflags = popw(&sa);
2044             }
2045         }
2046     }
2047     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2048               new_cs, new_eip, shift, addend);
2049     LOG_PCALL_STATE(env_cpu(env));
2050     if ((new_cs & 0xfffc) == 0) {
2051         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2052     }
2053     if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2054         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2055     }
2056     if (!(e2 & DESC_S_MASK) ||
2057         !(e2 & DESC_CS_MASK)) {
2058         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2059     }
2060     rpl = new_cs & 3;
2061     if (rpl < cpl) {
2062         raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2063     }
2064     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2065     if (e2 & DESC_C_MASK) {
2066         if (dpl > rpl) {
2067             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2068         }
2069     } else {
2070         if (dpl != rpl) {
2071             raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2072         }
2073     }
2074     if (!(e2 & DESC_P_MASK)) {
2075         raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2076     }
2077 
2078     sa.sp += addend;
2079     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2080                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2081         /* return to same privilege level */
2082         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2083                        get_seg_base(e1, e2),
2084                        get_seg_limit(e1, e2),
2085                        e2);
2086     } else {
2087         /* return to different privilege level */
2088 #ifdef TARGET_X86_64
2089         if (shift == 2) {
2090             new_esp = popq(&sa);
2091             new_ss = popq(&sa) & 0xffff;
2092         } else
2093 #endif
2094         {
2095             if (shift == 1) {
2096                 /* 32 bits */
2097                 new_esp = popl(&sa);
2098                 new_ss = popl(&sa) & 0xffff;
2099             } else {
2100                 /* 16 bits */
2101                 new_esp = popw(&sa);
2102                 new_ss = popw(&sa);
2103             }
2104         }
2105         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2106                   new_ss, new_esp);
2107         if ((new_ss & 0xfffc) == 0) {
2108 #ifdef TARGET_X86_64
2109             /* NULL ss is allowed in long mode if cpl != 3 */
2110             /* XXX: test CS64? */
2111             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2112                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2113                                        0, 0xffffffff,
2114                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2115                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2116                                        DESC_W_MASK | DESC_A_MASK);
2117                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2118             } else
2119 #endif
2120             {
2121                 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2122             }
2123         } else {
2124             if ((new_ss & 3) != rpl) {
2125                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2126             }
2127             if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2128                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2129             }
2130             if (!(ss_e2 & DESC_S_MASK) ||
2131                 (ss_e2 & DESC_CS_MASK) ||
2132                 !(ss_e2 & DESC_W_MASK)) {
2133                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2134             }
2135             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2136             if (dpl != rpl) {
2137                 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2138             }
2139             if (!(ss_e2 & DESC_P_MASK)) {
2140                 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2141             }
2142             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2143                                    get_seg_base(ss_e1, ss_e2),
2144                                    get_seg_limit(ss_e1, ss_e2),
2145                                    ss_e2);
2146         }
2147 
2148         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2149                        get_seg_base(e1, e2),
2150                        get_seg_limit(e1, e2),
2151                        e2);
2152         sa.sp = new_esp;
2153 #ifdef TARGET_X86_64
2154         if (env->hflags & HF_CS64_MASK) {
2155             sa.sp_mask = -1;
2156         } else
2157 #endif
2158         {
2159             sa.sp_mask = get_sp_mask(ss_e2);
2160         }
2161 
2162         /* validate data segments */
2163         validate_seg(env, R_ES, rpl);
2164         validate_seg(env, R_DS, rpl);
2165         validate_seg(env, R_FS, rpl);
2166         validate_seg(env, R_GS, rpl);
2167 
2168         sa.sp += addend;
2169     }
2170     SET_ESP(sa.sp, sa.sp_mask);
2171     env->eip = new_eip;
2172     if (is_iret) {
2173         /* NOTE: 'cpl' is the _old_ CPL */
2174         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2175         if (cpl == 0) {
2176             eflags_mask |= IOPL_MASK;
2177         }
2178         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2179         if (cpl <= iopl) {
2180             eflags_mask |= IF_MASK;
2181         }
2182         if (shift == 0) {
2183             eflags_mask &= 0xffff;
2184         }
2185         cpu_load_eflags(env, new_eflags, eflags_mask);
2186     }
2187     return;
2188 
2189  return_to_vm86:
2190     new_esp = popl(&sa);
2191     new_ss = popl(&sa);
2192     new_es = popl(&sa);
2193     new_ds = popl(&sa);
2194     new_fs = popl(&sa);
2195     new_gs = popl(&sa);
2196 
2197     /* modify processor state */
2198     cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2199                     IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2200                     VIP_MASK);
2201     load_seg_vm(env, R_CS, new_cs & 0xffff);
2202     load_seg_vm(env, R_SS, new_ss & 0xffff);
2203     load_seg_vm(env, R_ES, new_es & 0xffff);
2204     load_seg_vm(env, R_DS, new_ds & 0xffff);
2205     load_seg_vm(env, R_FS, new_fs & 0xffff);
2206     load_seg_vm(env, R_GS, new_gs & 0xffff);
2207 
2208     env->eip = new_eip & 0xffff;
2209     env->regs[R_ESP] = new_esp;
2210 }
2211 
helper_iret_protected(CPUX86State * env,int shift,int next_eip)2212 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2213 {
2214     int tss_selector, type;
2215     uint32_t e1, e2;
2216 
2217     /* specific case for TSS */
2218     if (env->eflags & NT_MASK) {
2219 #ifdef TARGET_X86_64
2220         if (env->hflags & HF_LMA_MASK) {
2221             raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2222         }
2223 #endif
2224         tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2225         if (tss_selector & 4) {
2226             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2227         }
2228         if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2229             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2230         }
2231         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2232         /* NOTE: we check both segment and busy TSS */
2233         if (type != 3) {
2234             raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2235         }
2236         switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2237     } else {
2238         helper_ret_protected(env, shift, 1, 0, GETPC());
2239     }
2240     env->hflags2 &= ~HF2_NMI_MASK;
2241 }
2242 
helper_lret_protected(CPUX86State * env,int shift,int addend)2243 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2244 {
2245     helper_ret_protected(env, shift, 0, addend, GETPC());
2246 }
2247 
helper_sysenter(CPUX86State * env)2248 void helper_sysenter(CPUX86State *env)
2249 {
2250     if (env->sysenter_cs == 0) {
2251         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2252     }
2253     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2254 
2255 #ifdef TARGET_X86_64
2256     if (env->hflags & HF_LMA_MASK) {
2257         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2258                                0, 0xffffffff,
2259                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2260                                DESC_S_MASK |
2261                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2262                                DESC_L_MASK);
2263     } else
2264 #endif
2265     {
2266         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2267                                0, 0xffffffff,
2268                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2269                                DESC_S_MASK |
2270                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2271     }
2272     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2273                            0, 0xffffffff,
2274                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2275                            DESC_S_MASK |
2276                            DESC_W_MASK | DESC_A_MASK);
2277     env->regs[R_ESP] = env->sysenter_esp;
2278     env->eip = env->sysenter_eip;
2279 }
2280 
helper_sysexit(CPUX86State * env,int dflag)2281 void helper_sysexit(CPUX86State *env, int dflag)
2282 {
2283     int cpl;
2284 
2285     cpl = env->hflags & HF_CPL_MASK;
2286     if (env->sysenter_cs == 0 || cpl != 0) {
2287         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2288     }
2289 #ifdef TARGET_X86_64
2290     if (dflag == 2) {
2291         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2292                                3, 0, 0xffffffff,
2293                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2294                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2295                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2296                                DESC_L_MASK);
2297         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2298                                3, 0, 0xffffffff,
2299                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2300                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2301                                DESC_W_MASK | DESC_A_MASK);
2302     } else
2303 #endif
2304     {
2305         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2306                                3, 0, 0xffffffff,
2307                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2308                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2309                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2310         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2311                                3, 0, 0xffffffff,
2312                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2313                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2314                                DESC_W_MASK | DESC_A_MASK);
2315     }
2316     env->regs[R_ESP] = env->regs[R_ECX];
2317     env->eip = env->regs[R_EDX];
2318 }
2319 
helper_lsl(CPUX86State * env,target_ulong selector1)2320 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2321 {
2322     unsigned int limit;
2323     uint32_t e1, e2, selector;
2324     int rpl, dpl, cpl, type;
2325 
2326     selector = selector1 & 0xffff;
2327     assert(CC_OP == CC_OP_EFLAGS);
2328     if ((selector & 0xfffc) == 0) {
2329         goto fail;
2330     }
2331     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2332         goto fail;
2333     }
2334     rpl = selector & 3;
2335     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2336     cpl = env->hflags & HF_CPL_MASK;
2337     if (e2 & DESC_S_MASK) {
2338         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2339             /* conforming */
2340         } else {
2341             if (dpl < cpl || dpl < rpl) {
2342                 goto fail;
2343             }
2344         }
2345     } else {
2346         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2347         switch (type) {
2348         case 1:
2349         case 2:
2350         case 3:
2351         case 9:
2352         case 11:
2353             break;
2354         default:
2355             goto fail;
2356         }
2357         if (dpl < cpl || dpl < rpl) {
2358         fail:
2359             CC_SRC &= ~CC_Z;
2360             return 0;
2361         }
2362     }
2363     limit = get_seg_limit(e1, e2);
2364     CC_SRC |= CC_Z;
2365     return limit;
2366 }
2367 
helper_lar(CPUX86State * env,target_ulong selector1)2368 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2369 {
2370     uint32_t e1, e2, selector;
2371     int rpl, dpl, cpl, type;
2372 
2373     selector = selector1 & 0xffff;
2374     assert(CC_OP == CC_OP_EFLAGS);
2375     if ((selector & 0xfffc) == 0) {
2376         goto fail;
2377     }
2378     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2379         goto fail;
2380     }
2381     rpl = selector & 3;
2382     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2383     cpl = env->hflags & HF_CPL_MASK;
2384     if (e2 & DESC_S_MASK) {
2385         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2386             /* conforming */
2387         } else {
2388             if (dpl < cpl || dpl < rpl) {
2389                 goto fail;
2390             }
2391         }
2392     } else {
2393         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2394         switch (type) {
2395         case 1:
2396         case 2:
2397         case 3:
2398         case 4:
2399         case 5:
2400         case 9:
2401         case 11:
2402         case 12:
2403             break;
2404         default:
2405             goto fail;
2406         }
2407         if (dpl < cpl || dpl < rpl) {
2408         fail:
2409             CC_SRC &= ~CC_Z;
2410             return 0;
2411         }
2412     }
2413     CC_SRC |= CC_Z;
2414     return e2 & 0x00f0ff00;
2415 }
2416 
helper_verr(CPUX86State * env,target_ulong selector1)2417 void helper_verr(CPUX86State *env, target_ulong selector1)
2418 {
2419     uint32_t e1, e2, eflags, selector;
2420     int rpl, dpl, cpl;
2421 
2422     selector = selector1 & 0xffff;
2423     eflags = cpu_cc_compute_all(env) | CC_Z;
2424     if ((selector & 0xfffc) == 0) {
2425         goto fail;
2426     }
2427     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2428         goto fail;
2429     }
2430     if (!(e2 & DESC_S_MASK)) {
2431         goto fail;
2432     }
2433     rpl = selector & 3;
2434     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2435     cpl = env->hflags & HF_CPL_MASK;
2436     if (e2 & DESC_CS_MASK) {
2437         if (!(e2 & DESC_R_MASK)) {
2438             goto fail;
2439         }
2440         if (!(e2 & DESC_C_MASK)) {
2441             if (dpl < cpl || dpl < rpl) {
2442                 goto fail;
2443             }
2444         }
2445     } else {
2446         if (dpl < cpl || dpl < rpl) {
2447         fail:
2448             eflags &= ~CC_Z;
2449         }
2450     }
2451     CC_SRC = eflags;
2452     CC_OP = CC_OP_EFLAGS;
2453 }
2454 
helper_verw(CPUX86State * env,target_ulong selector1)2455 void helper_verw(CPUX86State *env, target_ulong selector1)
2456 {
2457     uint32_t e1, e2, eflags, selector;
2458     int rpl, dpl, cpl;
2459 
2460     selector = selector1 & 0xffff;
2461     eflags = cpu_cc_compute_all(env) | CC_Z;
2462     if ((selector & 0xfffc) == 0) {
2463         goto fail;
2464     }
2465     if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2466         goto fail;
2467     }
2468     if (!(e2 & DESC_S_MASK)) {
2469         goto fail;
2470     }
2471     rpl = selector & 3;
2472     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473     cpl = env->hflags & HF_CPL_MASK;
2474     if (e2 & DESC_CS_MASK) {
2475         goto fail;
2476     } else {
2477         if (dpl < cpl || dpl < rpl) {
2478             goto fail;
2479         }
2480         if (!(e2 & DESC_W_MASK)) {
2481         fail:
2482             eflags &= ~CC_Z;
2483         }
2484     }
2485     CC_SRC = eflags;
2486     CC_OP = CC_OP_EFLAGS;
2487 }
2488