1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30
31 /* return non zero if error */
load_segment_ra(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector,uintptr_t retaddr)32 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
33 uint32_t *e2_ptr, int selector,
34 uintptr_t retaddr)
35 {
36 SegmentCache *dt;
37 int index;
38 target_ulong ptr;
39
40 if (selector & 0x4) {
41 dt = &env->ldt;
42 } else {
43 dt = &env->gdt;
44 }
45 index = selector & ~7;
46 if ((index + 7) > dt->limit) {
47 return -1;
48 }
49 ptr = dt->base + index;
50 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
51 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
52 return 0;
53 }
54
load_segment(CPUX86State * env,uint32_t * e1_ptr,uint32_t * e2_ptr,int selector)55 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
56 uint32_t *e2_ptr, int selector)
57 {
58 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
59 }
60
get_seg_limit(uint32_t e1,uint32_t e2)61 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
62 {
63 unsigned int limit;
64
65 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
66 if (e2 & DESC_G_MASK) {
67 limit = (limit << 12) | 0xfff;
68 }
69 return limit;
70 }
71
get_seg_base(uint32_t e1,uint32_t e2)72 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
73 {
74 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
75 }
76
load_seg_cache_raw_dt(SegmentCache * sc,uint32_t e1,uint32_t e2)77 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
78 uint32_t e2)
79 {
80 sc->base = get_seg_base(e1, e2);
81 sc->limit = get_seg_limit(e1, e2);
82 sc->flags = e2;
83 }
84
85 /* init the segment cache in vm86 mode. */
load_seg_vm(CPUX86State * env,int seg,int selector)86 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
87 {
88 selector &= 0xffff;
89
90 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
91 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
92 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
93 }
94
get_ss_esp_from_tss(CPUX86State * env,uint32_t * ss_ptr,uint32_t * esp_ptr,int dpl,uintptr_t retaddr)95 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
96 uint32_t *esp_ptr, int dpl,
97 uintptr_t retaddr)
98 {
99 X86CPU *cpu = env_archcpu(env);
100 int type, index, shift;
101
102 #if 0
103 {
104 int i;
105 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
106 for (i = 0; i < env->tr.limit; i++) {
107 printf("%02x ", env->tr.base[i]);
108 if ((i & 7) == 7) {
109 printf("\n");
110 }
111 }
112 printf("\n");
113 }
114 #endif
115
116 if (!(env->tr.flags & DESC_P_MASK)) {
117 cpu_abort(CPU(cpu), "invalid tss");
118 }
119 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
120 if ((type & 7) != 1) {
121 cpu_abort(CPU(cpu), "invalid tss type");
122 }
123 shift = type >> 3;
124 index = (dpl * 4 + 2) << shift;
125 if (index + (4 << shift) - 1 > env->tr.limit) {
126 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
127 }
128 if (shift == 0) {
129 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
130 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
131 } else {
132 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
133 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
134 }
135 }
136
tss_load_seg(CPUX86State * env,X86Seg seg_reg,int selector,int cpl,uintptr_t retaddr)137 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
138 int cpl, uintptr_t retaddr)
139 {
140 uint32_t e1, e2;
141 int rpl, dpl;
142
143 if ((selector & 0xfffc) != 0) {
144 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
145 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
146 }
147 if (!(e2 & DESC_S_MASK)) {
148 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
149 }
150 rpl = selector & 3;
151 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
152 if (seg_reg == R_CS) {
153 if (!(e2 & DESC_CS_MASK)) {
154 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
155 }
156 if (dpl != rpl) {
157 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
158 }
159 } else if (seg_reg == R_SS) {
160 /* SS must be writable data */
161 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
162 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
163 }
164 if (dpl != cpl || dpl != rpl) {
165 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
166 }
167 } else {
168 /* not readable code */
169 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
170 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
171 }
172 /* if data or non conforming code, checks the rights */
173 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
174 if (dpl < cpl || dpl < rpl) {
175 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
176 }
177 }
178 }
179 if (!(e2 & DESC_P_MASK)) {
180 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
181 }
182 cpu_x86_load_seg_cache(env, seg_reg, selector,
183 get_seg_base(e1, e2),
184 get_seg_limit(e1, e2),
185 e2);
186 } else {
187 if (seg_reg == R_SS || seg_reg == R_CS) {
188 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
189 }
190 }
191 }
192
193 #define SWITCH_TSS_JMP 0
194 #define SWITCH_TSS_IRET 1
195 #define SWITCH_TSS_CALL 2
196
197 /* XXX: restore CPU state in registers (PowerPC case) */
switch_tss_ra(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip,uintptr_t retaddr)198 static void switch_tss_ra(CPUX86State *env, int tss_selector,
199 uint32_t e1, uint32_t e2, int source,
200 uint32_t next_eip, uintptr_t retaddr)
201 {
202 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
203 target_ulong tss_base;
204 uint32_t new_regs[8], new_segs[6];
205 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
206 uint32_t old_eflags, eflags_mask;
207 SegmentCache *dt;
208 int index;
209 target_ulong ptr;
210
211 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
212 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
213 source);
214
215 /* if task gate, we read the TSS segment and we load it */
216 if (type == 5) {
217 if (!(e2 & DESC_P_MASK)) {
218 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
219 }
220 tss_selector = e1 >> 16;
221 if (tss_selector & 4) {
222 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
223 }
224 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
225 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
226 }
227 if (e2 & DESC_S_MASK) {
228 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
229 }
230 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
231 if ((type & 7) != 1) {
232 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
233 }
234 }
235
236 if (!(e2 & DESC_P_MASK)) {
237 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
238 }
239
240 if (type & 8) {
241 tss_limit_max = 103;
242 } else {
243 tss_limit_max = 43;
244 }
245 tss_limit = get_seg_limit(e1, e2);
246 tss_base = get_seg_base(e1, e2);
247 if ((tss_selector & 4) != 0 ||
248 tss_limit < tss_limit_max) {
249 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
250 }
251 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
252 if (old_type & 8) {
253 old_tss_limit_max = 103;
254 } else {
255 old_tss_limit_max = 43;
256 }
257
258 /* read all the registers from the new TSS */
259 if (type & 8) {
260 /* 32 bit */
261 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
262 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
263 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
264 for (i = 0; i < 8; i++) {
265 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
266 retaddr);
267 }
268 for (i = 0; i < 6; i++) {
269 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
270 retaddr);
271 }
272 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
273 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
274 } else {
275 /* 16 bit */
276 new_cr3 = 0;
277 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
278 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
279 for (i = 0; i < 8; i++) {
280 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2), retaddr);
281 }
282 for (i = 0; i < 4; i++) {
283 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 2),
284 retaddr);
285 }
286 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
287 new_segs[R_FS] = 0;
288 new_segs[R_GS] = 0;
289 new_trap = 0;
290 }
291 /* XXX: avoid a compiler warning, see
292 http://support.amd.com/us/Processor_TechDocs/24593.pdf
293 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
294 (void)new_trap;
295
296 /* NOTE: we must avoid memory exceptions during the task switch,
297 so we make dummy accesses before */
298 /* XXX: it can still fail in some cases, so a bigger hack is
299 necessary to valid the TLB after having done the accesses */
300
301 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
302 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
303 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
304 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
305
306 /* clear busy bit (it is restartable) */
307 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
308 target_ulong ptr;
309 uint32_t e2;
310
311 ptr = env->gdt.base + (env->tr.selector & ~7);
312 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
313 e2 &= ~DESC_TSS_BUSY_MASK;
314 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
315 }
316 old_eflags = cpu_compute_eflags(env);
317 if (source == SWITCH_TSS_IRET) {
318 old_eflags &= ~NT_MASK;
319 }
320
321 /* save the current state in the old TSS */
322 if (old_type & 8) {
323 /* 32 bit */
324 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
325 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
326 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
327 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
328 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
329 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
330 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
331 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
332 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
333 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
334 for (i = 0; i < 6; i++) {
335 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
336 env->segs[i].selector, retaddr);
337 }
338 } else {
339 /* 16 bit */
340 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
341 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
342 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
343 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
344 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
345 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
346 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
347 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
348 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
349 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
350 for (i = 0; i < 4; i++) {
351 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 2),
352 env->segs[i].selector, retaddr);
353 }
354 }
355
356 /* now if an exception occurs, it will occurs in the next task
357 context */
358
359 if (source == SWITCH_TSS_CALL) {
360 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
361 new_eflags |= NT_MASK;
362 }
363
364 /* set busy bit */
365 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
366 target_ulong ptr;
367 uint32_t e2;
368
369 ptr = env->gdt.base + (tss_selector & ~7);
370 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
371 e2 |= DESC_TSS_BUSY_MASK;
372 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
373 }
374
375 /* set the new CPU state */
376 /* from this point, any exception which occurs can give problems */
377 env->cr[0] |= CR0_TS_MASK;
378 env->hflags |= HF_TS_MASK;
379 env->tr.selector = tss_selector;
380 env->tr.base = tss_base;
381 env->tr.limit = tss_limit;
382 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
383
384 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
385 cpu_x86_update_cr3(env, new_cr3);
386 }
387
388 /* load all registers without an exception, then reload them with
389 possible exception */
390 env->eip = new_eip;
391 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
392 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
393 if (type & 8) {
394 cpu_load_eflags(env, new_eflags, eflags_mask);
395 for (i = 0; i < 8; i++) {
396 env->regs[i] = new_regs[i];
397 }
398 } else {
399 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
400 for (i = 0; i < 8; i++) {
401 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
402 }
403 }
404 if (new_eflags & VM_MASK) {
405 for (i = 0; i < 6; i++) {
406 load_seg_vm(env, i, new_segs[i]);
407 }
408 } else {
409 /* first just selectors as the rest may trigger exceptions */
410 for (i = 0; i < 6; i++) {
411 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
412 }
413 }
414
415 env->ldt.selector = new_ldt & ~4;
416 env->ldt.base = 0;
417 env->ldt.limit = 0;
418 env->ldt.flags = 0;
419
420 /* load the LDT */
421 if (new_ldt & 4) {
422 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
423 }
424
425 if ((new_ldt & 0xfffc) != 0) {
426 dt = &env->gdt;
427 index = new_ldt & ~7;
428 if ((index + 7) > dt->limit) {
429 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
430 }
431 ptr = dt->base + index;
432 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
433 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
434 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
435 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
436 }
437 if (!(e2 & DESC_P_MASK)) {
438 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
439 }
440 load_seg_cache_raw_dt(&env->ldt, e1, e2);
441 }
442
443 /* load the segments */
444 if (!(new_eflags & VM_MASK)) {
445 int cpl = new_segs[R_CS] & 3;
446 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
447 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
448 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
449 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
450 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
451 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
452 }
453
454 /* check that env->eip is in the CS segment limits */
455 if (new_eip > env->segs[R_CS].limit) {
456 /* XXX: different exception if CALL? */
457 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
458 }
459
460 #ifndef CONFIG_USER_ONLY
461 /* reset local breakpoints */
462 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
463 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
464 }
465 #endif
466 }
467
switch_tss(CPUX86State * env,int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip)468 static void switch_tss(CPUX86State *env, int tss_selector,
469 uint32_t e1, uint32_t e2, int source,
470 uint32_t next_eip)
471 {
472 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
473 }
474
get_sp_mask(unsigned int e2)475 static inline unsigned int get_sp_mask(unsigned int e2)
476 {
477 #ifdef TARGET_X86_64
478 if (e2 & DESC_L_MASK) {
479 return 0;
480 } else
481 #endif
482 if (e2 & DESC_B_MASK) {
483 return 0xffffffff;
484 } else {
485 return 0xffff;
486 }
487 }
488
exception_has_error_code(int intno)489 int exception_has_error_code(int intno)
490 {
491 switch (intno) {
492 case 8:
493 case 10:
494 case 11:
495 case 12:
496 case 13:
497 case 14:
498 case 17:
499 return 1;
500 }
501 return 0;
502 }
503
504 #ifdef TARGET_X86_64
505 #define SET_ESP(val, sp_mask) \
506 do { \
507 if ((sp_mask) == 0xffff) { \
508 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
509 ((val) & 0xffff); \
510 } else if ((sp_mask) == 0xffffffffLL) { \
511 env->regs[R_ESP] = (uint32_t)(val); \
512 } else { \
513 env->regs[R_ESP] = (val); \
514 } \
515 } while (0)
516 #else
517 #define SET_ESP(val, sp_mask) \
518 do { \
519 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
520 ((val) & (sp_mask)); \
521 } while (0)
522 #endif
523
524 /* in 64-bit machines, this can overflow. So this segment addition macro
525 * can be used to trim the value to 32-bit whenever needed */
526 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
527
528 /* XXX: add a is_user flag to have proper security support */
529 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
530 { \
531 sp -= 2; \
532 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
533 }
534
535 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
536 { \
537 sp -= 4; \
538 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
539 }
540
541 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
542 { \
543 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
544 sp += 2; \
545 }
546
547 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
548 { \
549 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
550 sp += 4; \
551 }
552
553 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
554 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
555 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
556 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
557
558 /* protected mode interrupt */
do_interrupt_protected(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip,int is_hw)559 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
560 int error_code, unsigned int next_eip,
561 int is_hw)
562 {
563 SegmentCache *dt;
564 target_ulong ptr, ssp;
565 int type, dpl, selector, ss_dpl, cpl;
566 int has_error_code, new_stack, shift;
567 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
568 uint32_t old_eip, sp_mask;
569 int vm86 = env->eflags & VM_MASK;
570
571 has_error_code = 0;
572 if (!is_int && !is_hw) {
573 has_error_code = exception_has_error_code(intno);
574 }
575 if (is_int) {
576 old_eip = next_eip;
577 } else {
578 old_eip = env->eip;
579 }
580
581 dt = &env->idt;
582 if (intno * 8 + 7 > dt->limit) {
583 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
584 }
585 ptr = dt->base + intno * 8;
586 e1 = cpu_ldl_kernel(env, ptr);
587 e2 = cpu_ldl_kernel(env, ptr + 4);
588 /* check gate type */
589 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
590 switch (type) {
591 case 5: /* task gate */
592 case 6: /* 286 interrupt gate */
593 case 7: /* 286 trap gate */
594 case 14: /* 386 interrupt gate */
595 case 15: /* 386 trap gate */
596 break;
597 default:
598 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
599 break;
600 }
601 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
602 cpl = env->hflags & HF_CPL_MASK;
603 /* check privilege if software int */
604 if (is_int && dpl < cpl) {
605 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
606 }
607
608 if (type == 5) {
609 /* task gate */
610 /* must do that check here to return the correct error code */
611 if (!(e2 & DESC_P_MASK)) {
612 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
613 }
614 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
615 if (has_error_code) {
616 int type;
617 uint32_t mask;
618
619 /* push the error code */
620 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
621 shift = type >> 3;
622 if (env->segs[R_SS].flags & DESC_B_MASK) {
623 mask = 0xffffffff;
624 } else {
625 mask = 0xffff;
626 }
627 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
628 ssp = env->segs[R_SS].base + esp;
629 if (shift) {
630 cpu_stl_kernel(env, ssp, error_code);
631 } else {
632 cpu_stw_kernel(env, ssp, error_code);
633 }
634 SET_ESP(esp, mask);
635 }
636 return;
637 }
638
639 /* Otherwise, trap or interrupt gate */
640
641 /* check valid bit */
642 if (!(e2 & DESC_P_MASK)) {
643 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
644 }
645 selector = e1 >> 16;
646 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
647 if ((selector & 0xfffc) == 0) {
648 raise_exception_err(env, EXCP0D_GPF, 0);
649 }
650 if (load_segment(env, &e1, &e2, selector) != 0) {
651 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
652 }
653 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
654 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
655 }
656 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
657 if (dpl > cpl) {
658 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
659 }
660 if (!(e2 & DESC_P_MASK)) {
661 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
662 }
663 if (e2 & DESC_C_MASK) {
664 dpl = cpl;
665 }
666 if (dpl < cpl) {
667 /* to inner privilege */
668 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
669 if ((ss & 0xfffc) == 0) {
670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
671 }
672 if ((ss & 3) != dpl) {
673 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
674 }
675 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
676 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
677 }
678 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
679 if (ss_dpl != dpl) {
680 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
681 }
682 if (!(ss_e2 & DESC_S_MASK) ||
683 (ss_e2 & DESC_CS_MASK) ||
684 !(ss_e2 & DESC_W_MASK)) {
685 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
686 }
687 if (!(ss_e2 & DESC_P_MASK)) {
688 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
689 }
690 new_stack = 1;
691 sp_mask = get_sp_mask(ss_e2);
692 ssp = get_seg_base(ss_e1, ss_e2);
693 } else {
694 /* to same privilege */
695 if (vm86) {
696 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
697 }
698 new_stack = 0;
699 sp_mask = get_sp_mask(env->segs[R_SS].flags);
700 ssp = env->segs[R_SS].base;
701 esp = env->regs[R_ESP];
702 }
703
704 shift = type >> 3;
705
706 #if 0
707 /* XXX: check that enough room is available */
708 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
709 if (vm86) {
710 push_size += 8;
711 }
712 push_size <<= shift;
713 #endif
714 if (shift == 1) {
715 if (new_stack) {
716 if (vm86) {
717 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
718 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
719 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
720 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
721 }
722 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
723 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
724 }
725 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
726 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
727 PUSHL(ssp, esp, sp_mask, old_eip);
728 if (has_error_code) {
729 PUSHL(ssp, esp, sp_mask, error_code);
730 }
731 } else {
732 if (new_stack) {
733 if (vm86) {
734 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
735 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
736 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
737 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
738 }
739 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
740 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
741 }
742 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
743 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
744 PUSHW(ssp, esp, sp_mask, old_eip);
745 if (has_error_code) {
746 PUSHW(ssp, esp, sp_mask, error_code);
747 }
748 }
749
750 /* interrupt gate clear IF mask */
751 if ((type & 1) == 0) {
752 env->eflags &= ~IF_MASK;
753 }
754 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
755
756 if (new_stack) {
757 if (vm86) {
758 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
759 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
762 }
763 ss = (ss & ~3) | dpl;
764 cpu_x86_load_seg_cache(env, R_SS, ss,
765 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
766 }
767 SET_ESP(esp, sp_mask);
768
769 selector = (selector & ~3) | dpl;
770 cpu_x86_load_seg_cache(env, R_CS, selector,
771 get_seg_base(e1, e2),
772 get_seg_limit(e1, e2),
773 e2);
774 env->eip = offset;
775 }
776
777 #ifdef TARGET_X86_64
778
779 #define PUSHQ_RA(sp, val, ra) \
780 { \
781 sp -= 8; \
782 cpu_stq_kernel_ra(env, sp, (val), ra); \
783 }
784
785 #define POPQ_RA(sp, val, ra) \
786 { \
787 val = cpu_ldq_kernel_ra(env, sp, ra); \
788 sp += 8; \
789 }
790
791 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
792 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
793
get_rsp_from_tss(CPUX86State * env,int level)794 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
795 {
796 X86CPU *cpu = env_archcpu(env);
797 int index;
798
799 #if 0
800 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
801 env->tr.base, env->tr.limit);
802 #endif
803
804 if (!(env->tr.flags & DESC_P_MASK)) {
805 cpu_abort(CPU(cpu), "invalid tss");
806 }
807 index = 8 * level + 4;
808 if ((index + 7) > env->tr.limit) {
809 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
810 }
811 return cpu_ldq_kernel(env, env->tr.base + index);
812 }
813
814 /* 64 bit interrupt */
do_interrupt64(CPUX86State * env,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)815 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
816 int error_code, target_ulong next_eip, int is_hw)
817 {
818 SegmentCache *dt;
819 target_ulong ptr;
820 int type, dpl, selector, cpl, ist;
821 int has_error_code, new_stack;
822 uint32_t e1, e2, e3, ss;
823 target_ulong old_eip, esp, offset;
824
825 has_error_code = 0;
826 if (!is_int && !is_hw) {
827 has_error_code = exception_has_error_code(intno);
828 }
829 if (is_int) {
830 old_eip = next_eip;
831 } else {
832 old_eip = env->eip;
833 }
834
835 dt = &env->idt;
836 if (intno * 16 + 15 > dt->limit) {
837 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
838 }
839 ptr = dt->base + intno * 16;
840 e1 = cpu_ldl_kernel(env, ptr);
841 e2 = cpu_ldl_kernel(env, ptr + 4);
842 e3 = cpu_ldl_kernel(env, ptr + 8);
843 /* check gate type */
844 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
845 switch (type) {
846 case 14: /* 386 interrupt gate */
847 case 15: /* 386 trap gate */
848 break;
849 default:
850 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
851 break;
852 }
853 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
854 cpl = env->hflags & HF_CPL_MASK;
855 /* check privilege if software int */
856 if (is_int && dpl < cpl) {
857 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
858 }
859 /* check valid bit */
860 if (!(e2 & DESC_P_MASK)) {
861 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
862 }
863 selector = e1 >> 16;
864 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
865 ist = e2 & 7;
866 if ((selector & 0xfffc) == 0) {
867 raise_exception_err(env, EXCP0D_GPF, 0);
868 }
869
870 if (load_segment(env, &e1, &e2, selector) != 0) {
871 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
872 }
873 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
874 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
875 }
876 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
877 if (dpl > cpl) {
878 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
879 }
880 if (!(e2 & DESC_P_MASK)) {
881 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
882 }
883 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
884 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
885 }
886 if (e2 & DESC_C_MASK) {
887 dpl = cpl;
888 }
889 if (dpl < cpl || ist != 0) {
890 /* to inner privilege */
891 new_stack = 1;
892 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
893 ss = 0;
894 } else {
895 /* to same privilege */
896 if (env->eflags & VM_MASK) {
897 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
898 }
899 new_stack = 0;
900 esp = env->regs[R_ESP];
901 }
902 esp &= ~0xfLL; /* align stack */
903
904 PUSHQ(esp, env->segs[R_SS].selector);
905 PUSHQ(esp, env->regs[R_ESP]);
906 PUSHQ(esp, cpu_compute_eflags(env));
907 PUSHQ(esp, env->segs[R_CS].selector);
908 PUSHQ(esp, old_eip);
909 if (has_error_code) {
910 PUSHQ(esp, error_code);
911 }
912
913 /* interrupt gate clear IF mask */
914 if ((type & 1) == 0) {
915 env->eflags &= ~IF_MASK;
916 }
917 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
918
919 if (new_stack) {
920 ss = 0 | dpl;
921 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
922 }
923 env->regs[R_ESP] = esp;
924
925 selector = (selector & ~3) | dpl;
926 cpu_x86_load_seg_cache(env, R_CS, selector,
927 get_seg_base(e1, e2),
928 get_seg_limit(e1, e2),
929 e2);
930 env->eip = offset;
931 }
932
helper_sysret(CPUX86State * env,int dflag)933 void helper_sysret(CPUX86State *env, int dflag)
934 {
935 int cpl, selector;
936
937 if (!(env->efer & MSR_EFER_SCE)) {
938 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
939 }
940 cpl = env->hflags & HF_CPL_MASK;
941 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
942 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
943 }
944 selector = (env->star >> 48) & 0xffff;
945 if (env->hflags & HF_LMA_MASK) {
946 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
947 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
948 NT_MASK);
949 if (dflag == 2) {
950 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
951 0, 0xffffffff,
952 DESC_G_MASK | DESC_P_MASK |
953 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
954 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
955 DESC_L_MASK);
956 env->eip = env->regs[R_ECX];
957 } else {
958 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
959 0, 0xffffffff,
960 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
961 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
962 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
963 env->eip = (uint32_t)env->regs[R_ECX];
964 }
965 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
966 0, 0xffffffff,
967 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
968 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
969 DESC_W_MASK | DESC_A_MASK);
970 } else {
971 env->eflags |= IF_MASK;
972 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
973 0, 0xffffffff,
974 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
975 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
976 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
977 env->eip = (uint32_t)env->regs[R_ECX];
978 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
979 0, 0xffffffff,
980 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
981 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
982 DESC_W_MASK | DESC_A_MASK);
983 }
984 }
985 #endif /* TARGET_X86_64 */
986
987 /* real mode interrupt */
do_interrupt_real(CPUX86State * env,int intno,int is_int,int error_code,unsigned int next_eip)988 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
989 int error_code, unsigned int next_eip)
990 {
991 SegmentCache *dt;
992 target_ulong ptr, ssp;
993 int selector;
994 uint32_t offset, esp;
995 uint32_t old_cs, old_eip;
996
997 /* real mode (simpler!) */
998 dt = &env->idt;
999 if (intno * 4 + 3 > dt->limit) {
1000 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1001 }
1002 ptr = dt->base + intno * 4;
1003 offset = cpu_lduw_kernel(env, ptr);
1004 selector = cpu_lduw_kernel(env, ptr + 2);
1005 esp = env->regs[R_ESP];
1006 ssp = env->segs[R_SS].base;
1007 if (is_int) {
1008 old_eip = next_eip;
1009 } else {
1010 old_eip = env->eip;
1011 }
1012 old_cs = env->segs[R_CS].selector;
1013 /* XXX: use SS segment size? */
1014 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1015 PUSHW(ssp, esp, 0xffff, old_cs);
1016 PUSHW(ssp, esp, 0xffff, old_eip);
1017
1018 /* update processor state */
1019 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1020 env->eip = offset;
1021 env->segs[R_CS].selector = selector;
1022 env->segs[R_CS].base = (selector << 4);
1023 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1024 }
1025
1026 /*
1027 * Begin execution of an interruption. is_int is TRUE if coming from
1028 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1029 * instruction. It is only relevant if is_int is TRUE.
1030 */
do_interrupt_all(X86CPU * cpu,int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)1031 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1032 int error_code, target_ulong next_eip, int is_hw)
1033 {
1034 CPUX86State *env = &cpu->env;
1035
1036 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1037 if ((env->cr[0] & CR0_PE_MASK)) {
1038 static int count;
1039
1040 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1041 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1042 count, intno, error_code, is_int,
1043 env->hflags & HF_CPL_MASK,
1044 env->segs[R_CS].selector, env->eip,
1045 (int)env->segs[R_CS].base + env->eip,
1046 env->segs[R_SS].selector, env->regs[R_ESP]);
1047 if (intno == 0x0e) {
1048 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1049 } else {
1050 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1051 }
1052 qemu_log("\n");
1053 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1054 #if 0
1055 {
1056 int i;
1057 target_ulong ptr;
1058
1059 qemu_log(" code=");
1060 ptr = env->segs[R_CS].base + env->eip;
1061 for (i = 0; i < 16; i++) {
1062 qemu_log(" %02x", ldub(ptr + i));
1063 }
1064 qemu_log("\n");
1065 }
1066 #endif
1067 count++;
1068 }
1069 }
1070 if (env->cr[0] & CR0_PE_MASK) {
1071 #if !defined(CONFIG_USER_ONLY)
1072 if (env->hflags & HF_GUEST_MASK) {
1073 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1074 }
1075 #endif
1076 #ifdef TARGET_X86_64
1077 if (env->hflags & HF_LMA_MASK) {
1078 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1079 } else
1080 #endif
1081 {
1082 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1083 is_hw);
1084 }
1085 } else {
1086 #if !defined(CONFIG_USER_ONLY)
1087 if (env->hflags & HF_GUEST_MASK) {
1088 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1089 }
1090 #endif
1091 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1092 }
1093
1094 #if !defined(CONFIG_USER_ONLY)
1095 if (env->hflags & HF_GUEST_MASK) {
1096 CPUState *cs = CPU(cpu);
1097 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1098 offsetof(struct vmcb,
1099 control.event_inj));
1100
1101 x86_stl_phys(cs,
1102 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1103 event_inj & ~SVM_EVTINJ_VALID);
1104 }
1105 #endif
1106 }
1107
do_interrupt_x86_hardirq(CPUX86State * env,int intno,int is_hw)1108 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1109 {
1110 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1111 }
1112
helper_lldt(CPUX86State * env,int selector)1113 void helper_lldt(CPUX86State *env, int selector)
1114 {
1115 SegmentCache *dt;
1116 uint32_t e1, e2;
1117 int index, entry_limit;
1118 target_ulong ptr;
1119
1120 selector &= 0xffff;
1121 if ((selector & 0xfffc) == 0) {
1122 /* XXX: NULL selector case: invalid LDT */
1123 env->ldt.base = 0;
1124 env->ldt.limit = 0;
1125 } else {
1126 if (selector & 0x4) {
1127 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1128 }
1129 dt = &env->gdt;
1130 index = selector & ~7;
1131 #ifdef TARGET_X86_64
1132 if (env->hflags & HF_LMA_MASK) {
1133 entry_limit = 15;
1134 } else
1135 #endif
1136 {
1137 entry_limit = 7;
1138 }
1139 if ((index + entry_limit) > dt->limit) {
1140 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1141 }
1142 ptr = dt->base + index;
1143 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1144 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1145 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1146 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1147 }
1148 if (!(e2 & DESC_P_MASK)) {
1149 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1150 }
1151 #ifdef TARGET_X86_64
1152 if (env->hflags & HF_LMA_MASK) {
1153 uint32_t e3;
1154
1155 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1156 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1157 env->ldt.base |= (target_ulong)e3 << 32;
1158 } else
1159 #endif
1160 {
1161 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1162 }
1163 }
1164 env->ldt.selector = selector;
1165 }
1166
helper_ltr(CPUX86State * env,int selector)1167 void helper_ltr(CPUX86State *env, int selector)
1168 {
1169 SegmentCache *dt;
1170 uint32_t e1, e2;
1171 int index, type, entry_limit;
1172 target_ulong ptr;
1173
1174 selector &= 0xffff;
1175 if ((selector & 0xfffc) == 0) {
1176 /* NULL selector case: invalid TR */
1177 env->tr.base = 0;
1178 env->tr.limit = 0;
1179 env->tr.flags = 0;
1180 } else {
1181 if (selector & 0x4) {
1182 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1183 }
1184 dt = &env->gdt;
1185 index = selector & ~7;
1186 #ifdef TARGET_X86_64
1187 if (env->hflags & HF_LMA_MASK) {
1188 entry_limit = 15;
1189 } else
1190 #endif
1191 {
1192 entry_limit = 7;
1193 }
1194 if ((index + entry_limit) > dt->limit) {
1195 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1196 }
1197 ptr = dt->base + index;
1198 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1199 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1200 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1201 if ((e2 & DESC_S_MASK) ||
1202 (type != 1 && type != 9)) {
1203 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1204 }
1205 if (!(e2 & DESC_P_MASK)) {
1206 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1207 }
1208 #ifdef TARGET_X86_64
1209 if (env->hflags & HF_LMA_MASK) {
1210 uint32_t e3, e4;
1211
1212 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1213 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1214 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1215 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1216 }
1217 load_seg_cache_raw_dt(&env->tr, e1, e2);
1218 env->tr.base |= (target_ulong)e3 << 32;
1219 } else
1220 #endif
1221 {
1222 load_seg_cache_raw_dt(&env->tr, e1, e2);
1223 }
1224 e2 |= DESC_TSS_BUSY_MASK;
1225 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1226 }
1227 env->tr.selector = selector;
1228 }
1229
1230 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
helper_load_seg(CPUX86State * env,int seg_reg,int selector)1231 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1232 {
1233 uint32_t e1, e2;
1234 int cpl, dpl, rpl;
1235 SegmentCache *dt;
1236 int index;
1237 target_ulong ptr;
1238
1239 selector &= 0xffff;
1240 cpl = env->hflags & HF_CPL_MASK;
1241 if ((selector & 0xfffc) == 0) {
1242 /* null selector case */
1243 if (seg_reg == R_SS
1244 #ifdef TARGET_X86_64
1245 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1246 #endif
1247 ) {
1248 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1249 }
1250 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1251 } else {
1252
1253 if (selector & 0x4) {
1254 dt = &env->ldt;
1255 } else {
1256 dt = &env->gdt;
1257 }
1258 index = selector & ~7;
1259 if ((index + 7) > dt->limit) {
1260 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1261 }
1262 ptr = dt->base + index;
1263 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1264 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1265
1266 if (!(e2 & DESC_S_MASK)) {
1267 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1268 }
1269 rpl = selector & 3;
1270 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1271 if (seg_reg == R_SS) {
1272 /* must be writable segment */
1273 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1274 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1275 }
1276 if (rpl != cpl || dpl != cpl) {
1277 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1278 }
1279 } else {
1280 /* must be readable segment */
1281 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1282 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1283 }
1284
1285 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1286 /* if not conforming code, test rights */
1287 if (dpl < cpl || dpl < rpl) {
1288 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1289 }
1290 }
1291 }
1292
1293 if (!(e2 & DESC_P_MASK)) {
1294 if (seg_reg == R_SS) {
1295 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1296 } else {
1297 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1298 }
1299 }
1300
1301 /* set the access bit if not already set */
1302 if (!(e2 & DESC_A_MASK)) {
1303 e2 |= DESC_A_MASK;
1304 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1305 }
1306
1307 cpu_x86_load_seg_cache(env, seg_reg, selector,
1308 get_seg_base(e1, e2),
1309 get_seg_limit(e1, e2),
1310 e2);
1311 #if 0
1312 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1313 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1314 #endif
1315 }
1316 }
1317
1318 /* protected mode jump */
helper_ljmp_protected(CPUX86State * env,int new_cs,target_ulong new_eip,target_ulong next_eip)1319 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1320 target_ulong next_eip)
1321 {
1322 int gate_cs, type;
1323 uint32_t e1, e2, cpl, dpl, rpl, limit;
1324
1325 if ((new_cs & 0xfffc) == 0) {
1326 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1327 }
1328 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1329 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1330 }
1331 cpl = env->hflags & HF_CPL_MASK;
1332 if (e2 & DESC_S_MASK) {
1333 if (!(e2 & DESC_CS_MASK)) {
1334 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1335 }
1336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1337 if (e2 & DESC_C_MASK) {
1338 /* conforming code segment */
1339 if (dpl > cpl) {
1340 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1341 }
1342 } else {
1343 /* non conforming code segment */
1344 rpl = new_cs & 3;
1345 if (rpl > cpl) {
1346 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1347 }
1348 if (dpl != cpl) {
1349 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1350 }
1351 }
1352 if (!(e2 & DESC_P_MASK)) {
1353 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1354 }
1355 limit = get_seg_limit(e1, e2);
1356 if (new_eip > limit &&
1357 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1358 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1359 }
1360 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1361 get_seg_base(e1, e2), limit, e2);
1362 env->eip = new_eip;
1363 } else {
1364 /* jump to call or task gate */
1365 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1366 rpl = new_cs & 3;
1367 cpl = env->hflags & HF_CPL_MASK;
1368 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1369
1370 #ifdef TARGET_X86_64
1371 if (env->efer & MSR_EFER_LMA) {
1372 if (type != 12) {
1373 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1374 }
1375 }
1376 #endif
1377 switch (type) {
1378 case 1: /* 286 TSS */
1379 case 9: /* 386 TSS */
1380 case 5: /* task gate */
1381 if (dpl < cpl || dpl < rpl) {
1382 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1383 }
1384 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1385 break;
1386 case 4: /* 286 call gate */
1387 case 12: /* 386 call gate */
1388 if ((dpl < cpl) || (dpl < rpl)) {
1389 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1390 }
1391 if (!(e2 & DESC_P_MASK)) {
1392 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1393 }
1394 gate_cs = e1 >> 16;
1395 new_eip = (e1 & 0xffff);
1396 if (type == 12) {
1397 new_eip |= (e2 & 0xffff0000);
1398 }
1399
1400 #ifdef TARGET_X86_64
1401 if (env->efer & MSR_EFER_LMA) {
1402 /* load the upper 8 bytes of the 64-bit call gate */
1403 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1404 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1405 GETPC());
1406 }
1407 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1408 if (type != 0) {
1409 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1410 GETPC());
1411 }
1412 new_eip |= ((target_ulong)e1) << 32;
1413 }
1414 #endif
1415
1416 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1417 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1418 }
1419 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1420 /* must be code segment */
1421 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1422 (DESC_S_MASK | DESC_CS_MASK))) {
1423 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1424 }
1425 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1426 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1427 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1428 }
1429 #ifdef TARGET_X86_64
1430 if (env->efer & MSR_EFER_LMA) {
1431 if (!(e2 & DESC_L_MASK)) {
1432 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1433 }
1434 if (e2 & DESC_B_MASK) {
1435 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1436 }
1437 }
1438 #endif
1439 if (!(e2 & DESC_P_MASK)) {
1440 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1441 }
1442 limit = get_seg_limit(e1, e2);
1443 if (new_eip > limit &&
1444 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1445 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1446 }
1447 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1448 get_seg_base(e1, e2), limit, e2);
1449 env->eip = new_eip;
1450 break;
1451 default:
1452 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1453 break;
1454 }
1455 }
1456 }
1457
1458 /* real mode call */
helper_lcall_real(CPUX86State * env,int new_cs,target_ulong new_eip1,int shift,int next_eip)1459 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1460 int shift, int next_eip)
1461 {
1462 int new_eip;
1463 uint32_t esp, esp_mask;
1464 target_ulong ssp;
1465
1466 new_eip = new_eip1;
1467 esp = env->regs[R_ESP];
1468 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1469 ssp = env->segs[R_SS].base;
1470 if (shift) {
1471 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1472 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1473 } else {
1474 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1475 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1476 }
1477
1478 SET_ESP(esp, esp_mask);
1479 env->eip = new_eip;
1480 env->segs[R_CS].selector = new_cs;
1481 env->segs[R_CS].base = (new_cs << 4);
1482 }
1483
1484 /* protected mode call */
helper_lcall_protected(CPUX86State * env,int new_cs,target_ulong new_eip,int shift,target_ulong next_eip)1485 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1486 int shift, target_ulong next_eip)
1487 {
1488 int new_stack, i;
1489 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1490 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1491 uint32_t val, limit, old_sp_mask;
1492 target_ulong ssp, old_ssp, offset, sp;
1493
1494 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1495 LOG_PCALL_STATE(env_cpu(env));
1496 if ((new_cs & 0xfffc) == 0) {
1497 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1498 }
1499 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1500 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1501 }
1502 cpl = env->hflags & HF_CPL_MASK;
1503 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1504 if (e2 & DESC_S_MASK) {
1505 if (!(e2 & DESC_CS_MASK)) {
1506 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1507 }
1508 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1509 if (e2 & DESC_C_MASK) {
1510 /* conforming code segment */
1511 if (dpl > cpl) {
1512 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1513 }
1514 } else {
1515 /* non conforming code segment */
1516 rpl = new_cs & 3;
1517 if (rpl > cpl) {
1518 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1519 }
1520 if (dpl != cpl) {
1521 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1522 }
1523 }
1524 if (!(e2 & DESC_P_MASK)) {
1525 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1526 }
1527
1528 #ifdef TARGET_X86_64
1529 /* XXX: check 16/32 bit cases in long mode */
1530 if (shift == 2) {
1531 target_ulong rsp;
1532
1533 /* 64 bit case */
1534 rsp = env->regs[R_ESP];
1535 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1536 PUSHQ_RA(rsp, next_eip, GETPC());
1537 /* from this point, not restartable */
1538 env->regs[R_ESP] = rsp;
1539 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1540 get_seg_base(e1, e2),
1541 get_seg_limit(e1, e2), e2);
1542 env->eip = new_eip;
1543 } else
1544 #endif
1545 {
1546 sp = env->regs[R_ESP];
1547 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1548 ssp = env->segs[R_SS].base;
1549 if (shift) {
1550 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1551 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1552 } else {
1553 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1554 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1555 }
1556
1557 limit = get_seg_limit(e1, e2);
1558 if (new_eip > limit) {
1559 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1560 }
1561 /* from this point, not restartable */
1562 SET_ESP(sp, sp_mask);
1563 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1564 get_seg_base(e1, e2), limit, e2);
1565 env->eip = new_eip;
1566 }
1567 } else {
1568 /* check gate type */
1569 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1570 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1571 rpl = new_cs & 3;
1572
1573 #ifdef TARGET_X86_64
1574 if (env->efer & MSR_EFER_LMA) {
1575 if (type != 12) {
1576 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1577 }
1578 }
1579 #endif
1580
1581 switch (type) {
1582 case 1: /* available 286 TSS */
1583 case 9: /* available 386 TSS */
1584 case 5: /* task gate */
1585 if (dpl < cpl || dpl < rpl) {
1586 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1587 }
1588 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1589 return;
1590 case 4: /* 286 call gate */
1591 case 12: /* 386 call gate */
1592 break;
1593 default:
1594 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1595 break;
1596 }
1597 shift = type >> 3;
1598
1599 if (dpl < cpl || dpl < rpl) {
1600 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1601 }
1602 /* check valid bit */
1603 if (!(e2 & DESC_P_MASK)) {
1604 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1605 }
1606 selector = e1 >> 16;
1607 param_count = e2 & 0x1f;
1608 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1609 #ifdef TARGET_X86_64
1610 if (env->efer & MSR_EFER_LMA) {
1611 /* load the upper 8 bytes of the 64-bit call gate */
1612 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1613 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1614 GETPC());
1615 }
1616 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1617 if (type != 0) {
1618 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1619 GETPC());
1620 }
1621 offset |= ((target_ulong)e1) << 32;
1622 }
1623 #endif
1624 if ((selector & 0xfffc) == 0) {
1625 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1626 }
1627
1628 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1630 }
1631 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1632 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1633 }
1634 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1635 if (dpl > cpl) {
1636 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1637 }
1638 #ifdef TARGET_X86_64
1639 if (env->efer & MSR_EFER_LMA) {
1640 if (!(e2 & DESC_L_MASK)) {
1641 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1642 }
1643 if (e2 & DESC_B_MASK) {
1644 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1645 }
1646 shift++;
1647 }
1648 #endif
1649 if (!(e2 & DESC_P_MASK)) {
1650 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1651 }
1652
1653 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1654 /* to inner privilege */
1655 #ifdef TARGET_X86_64
1656 if (shift == 2) {
1657 sp = get_rsp_from_tss(env, dpl);
1658 ss = dpl; /* SS = NULL selector with RPL = new CPL */
1659 new_stack = 1;
1660 sp_mask = 0;
1661 ssp = 0; /* SS base is always zero in IA-32e mode */
1662 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1663 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1664 } else
1665 #endif
1666 {
1667 uint32_t sp32;
1668 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1669 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1670 TARGET_FMT_lx "\n", ss, sp32, param_count,
1671 env->regs[R_ESP]);
1672 sp = sp32;
1673 if ((ss & 0xfffc) == 0) {
1674 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1675 }
1676 if ((ss & 3) != dpl) {
1677 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1678 }
1679 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1680 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1681 }
1682 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1683 if (ss_dpl != dpl) {
1684 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1685 }
1686 if (!(ss_e2 & DESC_S_MASK) ||
1687 (ss_e2 & DESC_CS_MASK) ||
1688 !(ss_e2 & DESC_W_MASK)) {
1689 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1690 }
1691 if (!(ss_e2 & DESC_P_MASK)) {
1692 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1693 }
1694
1695 sp_mask = get_sp_mask(ss_e2);
1696 ssp = get_seg_base(ss_e1, ss_e2);
1697 }
1698
1699 /* push_size = ((param_count * 2) + 8) << shift; */
1700
1701 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1702 old_ssp = env->segs[R_SS].base;
1703 #ifdef TARGET_X86_64
1704 if (shift == 2) {
1705 /* XXX: verify if new stack address is canonical */
1706 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1707 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1708 /* parameters aren't supported for 64-bit call gates */
1709 } else
1710 #endif
1711 if (shift == 1) {
1712 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1713 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1714 for (i = param_count - 1; i >= 0; i--) {
1715 val = cpu_ldl_kernel_ra(env, old_ssp +
1716 ((env->regs[R_ESP] + i * 4) &
1717 old_sp_mask), GETPC());
1718 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1719 }
1720 } else {
1721 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1722 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1723 for (i = param_count - 1; i >= 0; i--) {
1724 val = cpu_lduw_kernel_ra(env, old_ssp +
1725 ((env->regs[R_ESP] + i * 2) &
1726 old_sp_mask), GETPC());
1727 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1728 }
1729 }
1730 new_stack = 1;
1731 } else {
1732 /* to same privilege */
1733 sp = env->regs[R_ESP];
1734 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1735 ssp = env->segs[R_SS].base;
1736 /* push_size = (4 << shift); */
1737 new_stack = 0;
1738 }
1739
1740 #ifdef TARGET_X86_64
1741 if (shift == 2) {
1742 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
1743 PUSHQ_RA(sp, next_eip, GETPC());
1744 } else
1745 #endif
1746 if (shift == 1) {
1747 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1748 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1749 } else {
1750 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1751 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1752 }
1753
1754 /* from this point, not restartable */
1755
1756 if (new_stack) {
1757 #ifdef TARGET_X86_64
1758 if (shift == 2) {
1759 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1760 } else
1761 #endif
1762 {
1763 ss = (ss & ~3) | dpl;
1764 cpu_x86_load_seg_cache(env, R_SS, ss,
1765 ssp,
1766 get_seg_limit(ss_e1, ss_e2),
1767 ss_e2);
1768 }
1769 }
1770
1771 selector = (selector & ~3) | dpl;
1772 cpu_x86_load_seg_cache(env, R_CS, selector,
1773 get_seg_base(e1, e2),
1774 get_seg_limit(e1, e2),
1775 e2);
1776 SET_ESP(sp, sp_mask);
1777 env->eip = offset;
1778 }
1779 }
1780
1781 /* real and vm86 mode iret */
helper_iret_real(CPUX86State * env,int shift)1782 void helper_iret_real(CPUX86State *env, int shift)
1783 {
1784 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1785 target_ulong ssp;
1786 int eflags_mask;
1787
1788 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1789 sp = env->regs[R_ESP];
1790 ssp = env->segs[R_SS].base;
1791 if (shift == 1) {
1792 /* 32 bits */
1793 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1794 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1795 new_cs &= 0xffff;
1796 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1797 } else {
1798 /* 16 bits */
1799 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1800 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1801 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1802 }
1803 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1804 env->segs[R_CS].selector = new_cs;
1805 env->segs[R_CS].base = (new_cs << 4);
1806 env->eip = new_eip;
1807 if (env->eflags & VM_MASK) {
1808 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1809 NT_MASK;
1810 } else {
1811 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1812 RF_MASK | NT_MASK;
1813 }
1814 if (shift == 0) {
1815 eflags_mask &= 0xffff;
1816 }
1817 cpu_load_eflags(env, new_eflags, eflags_mask);
1818 env->hflags2 &= ~HF2_NMI_MASK;
1819 }
1820
validate_seg(CPUX86State * env,X86Seg seg_reg,int cpl)1821 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1822 {
1823 int dpl;
1824 uint32_t e2;
1825
1826 /* XXX: on x86_64, we do not want to nullify FS and GS because
1827 they may still contain a valid base. I would be interested to
1828 know how a real x86_64 CPU behaves */
1829 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1830 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1831 return;
1832 }
1833
1834 e2 = env->segs[seg_reg].flags;
1835 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1836 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1837 /* data or non conforming code segment */
1838 if (dpl < cpl) {
1839 cpu_x86_load_seg_cache(env, seg_reg, 0,
1840 env->segs[seg_reg].base,
1841 env->segs[seg_reg].limit,
1842 env->segs[seg_reg].flags & ~DESC_P_MASK);
1843 }
1844 }
1845 }
1846
1847 /* protected mode iret */
helper_ret_protected(CPUX86State * env,int shift,int is_iret,int addend,uintptr_t retaddr)1848 static inline void helper_ret_protected(CPUX86State *env, int shift,
1849 int is_iret, int addend,
1850 uintptr_t retaddr)
1851 {
1852 uint32_t new_cs, new_eflags, new_ss;
1853 uint32_t new_es, new_ds, new_fs, new_gs;
1854 uint32_t e1, e2, ss_e1, ss_e2;
1855 int cpl, dpl, rpl, eflags_mask, iopl;
1856 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1857
1858 #ifdef TARGET_X86_64
1859 if (shift == 2) {
1860 sp_mask = -1;
1861 } else
1862 #endif
1863 {
1864 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1865 }
1866 sp = env->regs[R_ESP];
1867 ssp = env->segs[R_SS].base;
1868 new_eflags = 0; /* avoid warning */
1869 #ifdef TARGET_X86_64
1870 if (shift == 2) {
1871 POPQ_RA(sp, new_eip, retaddr);
1872 POPQ_RA(sp, new_cs, retaddr);
1873 new_cs &= 0xffff;
1874 if (is_iret) {
1875 POPQ_RA(sp, new_eflags, retaddr);
1876 }
1877 } else
1878 #endif
1879 {
1880 if (shift == 1) {
1881 /* 32 bits */
1882 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
1883 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
1884 new_cs &= 0xffff;
1885 if (is_iret) {
1886 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1887 if (new_eflags & VM_MASK) {
1888 goto return_to_vm86;
1889 }
1890 }
1891 } else {
1892 /* 16 bits */
1893 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
1894 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
1895 if (is_iret) {
1896 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
1897 }
1898 }
1899 }
1900 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
1901 new_cs, new_eip, shift, addend);
1902 LOG_PCALL_STATE(env_cpu(env));
1903 if ((new_cs & 0xfffc) == 0) {
1904 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1905 }
1906 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
1907 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1908 }
1909 if (!(e2 & DESC_S_MASK) ||
1910 !(e2 & DESC_CS_MASK)) {
1911 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1912 }
1913 cpl = env->hflags & HF_CPL_MASK;
1914 rpl = new_cs & 3;
1915 if (rpl < cpl) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1917 }
1918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1919 if (e2 & DESC_C_MASK) {
1920 if (dpl > rpl) {
1921 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1922 }
1923 } else {
1924 if (dpl != rpl) {
1925 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
1926 }
1927 }
1928 if (!(e2 & DESC_P_MASK)) {
1929 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
1930 }
1931
1932 sp += addend;
1933 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
1934 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1935 /* return to same privilege level */
1936 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1937 get_seg_base(e1, e2),
1938 get_seg_limit(e1, e2),
1939 e2);
1940 } else {
1941 /* return to different privilege level */
1942 #ifdef TARGET_X86_64
1943 if (shift == 2) {
1944 POPQ_RA(sp, new_esp, retaddr);
1945 POPQ_RA(sp, new_ss, retaddr);
1946 new_ss &= 0xffff;
1947 } else
1948 #endif
1949 {
1950 if (shift == 1) {
1951 /* 32 bits */
1952 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
1953 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
1954 new_ss &= 0xffff;
1955 } else {
1956 /* 16 bits */
1957 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
1958 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
1959 }
1960 }
1961 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
1962 new_ss, new_esp);
1963 if ((new_ss & 0xfffc) == 0) {
1964 #ifdef TARGET_X86_64
1965 /* NULL ss is allowed in long mode if cpl != 3 */
1966 /* XXX: test CS64? */
1967 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
1968 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1969 0, 0xffffffff,
1970 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1971 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
1972 DESC_W_MASK | DESC_A_MASK);
1973 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
1974 } else
1975 #endif
1976 {
1977 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
1978 }
1979 } else {
1980 if ((new_ss & 3) != rpl) {
1981 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
1982 }
1983 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
1984 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
1985 }
1986 if (!(ss_e2 & DESC_S_MASK) ||
1987 (ss_e2 & DESC_CS_MASK) ||
1988 !(ss_e2 & DESC_W_MASK)) {
1989 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
1990 }
1991 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1992 if (dpl != rpl) {
1993 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
1994 }
1995 if (!(ss_e2 & DESC_P_MASK)) {
1996 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
1997 }
1998 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1999 get_seg_base(ss_e1, ss_e2),
2000 get_seg_limit(ss_e1, ss_e2),
2001 ss_e2);
2002 }
2003
2004 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2005 get_seg_base(e1, e2),
2006 get_seg_limit(e1, e2),
2007 e2);
2008 sp = new_esp;
2009 #ifdef TARGET_X86_64
2010 if (env->hflags & HF_CS64_MASK) {
2011 sp_mask = -1;
2012 } else
2013 #endif
2014 {
2015 sp_mask = get_sp_mask(ss_e2);
2016 }
2017
2018 /* validate data segments */
2019 validate_seg(env, R_ES, rpl);
2020 validate_seg(env, R_DS, rpl);
2021 validate_seg(env, R_FS, rpl);
2022 validate_seg(env, R_GS, rpl);
2023
2024 sp += addend;
2025 }
2026 SET_ESP(sp, sp_mask);
2027 env->eip = new_eip;
2028 if (is_iret) {
2029 /* NOTE: 'cpl' is the _old_ CPL */
2030 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2031 if (cpl == 0) {
2032 eflags_mask |= IOPL_MASK;
2033 }
2034 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2035 if (cpl <= iopl) {
2036 eflags_mask |= IF_MASK;
2037 }
2038 if (shift == 0) {
2039 eflags_mask &= 0xffff;
2040 }
2041 cpu_load_eflags(env, new_eflags, eflags_mask);
2042 }
2043 return;
2044
2045 return_to_vm86:
2046 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2047 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2048 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2049 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2050 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2051 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2052
2053 /* modify processor state */
2054 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2055 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2056 VIP_MASK);
2057 load_seg_vm(env, R_CS, new_cs & 0xffff);
2058 load_seg_vm(env, R_SS, new_ss & 0xffff);
2059 load_seg_vm(env, R_ES, new_es & 0xffff);
2060 load_seg_vm(env, R_DS, new_ds & 0xffff);
2061 load_seg_vm(env, R_FS, new_fs & 0xffff);
2062 load_seg_vm(env, R_GS, new_gs & 0xffff);
2063
2064 env->eip = new_eip & 0xffff;
2065 env->regs[R_ESP] = new_esp;
2066 }
2067
helper_iret_protected(CPUX86State * env,int shift,int next_eip)2068 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2069 {
2070 int tss_selector, type;
2071 uint32_t e1, e2;
2072
2073 /* specific case for TSS */
2074 if (env->eflags & NT_MASK) {
2075 #ifdef TARGET_X86_64
2076 if (env->hflags & HF_LMA_MASK) {
2077 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2078 }
2079 #endif
2080 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2081 if (tss_selector & 4) {
2082 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2083 }
2084 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2085 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2086 }
2087 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2088 /* NOTE: we check both segment and busy TSS */
2089 if (type != 3) {
2090 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2091 }
2092 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2093 } else {
2094 helper_ret_protected(env, shift, 1, 0, GETPC());
2095 }
2096 env->hflags2 &= ~HF2_NMI_MASK;
2097 }
2098
helper_lret_protected(CPUX86State * env,int shift,int addend)2099 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2100 {
2101 helper_ret_protected(env, shift, 0, addend, GETPC());
2102 }
2103
helper_sysenter(CPUX86State * env)2104 void helper_sysenter(CPUX86State *env)
2105 {
2106 if (env->sysenter_cs == 0) {
2107 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2108 }
2109 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2110
2111 #ifdef TARGET_X86_64
2112 if (env->hflags & HF_LMA_MASK) {
2113 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2114 0, 0xffffffff,
2115 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2116 DESC_S_MASK |
2117 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2118 DESC_L_MASK);
2119 } else
2120 #endif
2121 {
2122 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2123 0, 0xffffffff,
2124 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2125 DESC_S_MASK |
2126 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2127 }
2128 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2129 0, 0xffffffff,
2130 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2131 DESC_S_MASK |
2132 DESC_W_MASK | DESC_A_MASK);
2133 env->regs[R_ESP] = env->sysenter_esp;
2134 env->eip = env->sysenter_eip;
2135 }
2136
helper_sysexit(CPUX86State * env,int dflag)2137 void helper_sysexit(CPUX86State *env, int dflag)
2138 {
2139 int cpl;
2140
2141 cpl = env->hflags & HF_CPL_MASK;
2142 if (env->sysenter_cs == 0 || cpl != 0) {
2143 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2144 }
2145 #ifdef TARGET_X86_64
2146 if (dflag == 2) {
2147 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2148 3, 0, 0xffffffff,
2149 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2150 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2151 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2152 DESC_L_MASK);
2153 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2154 3, 0, 0xffffffff,
2155 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2156 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2157 DESC_W_MASK | DESC_A_MASK);
2158 } else
2159 #endif
2160 {
2161 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2162 3, 0, 0xffffffff,
2163 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2164 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2165 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2166 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2167 3, 0, 0xffffffff,
2168 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2169 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2170 DESC_W_MASK | DESC_A_MASK);
2171 }
2172 env->regs[R_ESP] = env->regs[R_ECX];
2173 env->eip = env->regs[R_EDX];
2174 }
2175
helper_lsl(CPUX86State * env,target_ulong selector1)2176 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2177 {
2178 unsigned int limit;
2179 uint32_t e1, e2, eflags, selector;
2180 int rpl, dpl, cpl, type;
2181
2182 selector = selector1 & 0xffff;
2183 eflags = cpu_cc_compute_all(env, CC_OP);
2184 if ((selector & 0xfffc) == 0) {
2185 goto fail;
2186 }
2187 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2188 goto fail;
2189 }
2190 rpl = selector & 3;
2191 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2192 cpl = env->hflags & HF_CPL_MASK;
2193 if (e2 & DESC_S_MASK) {
2194 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2195 /* conforming */
2196 } else {
2197 if (dpl < cpl || dpl < rpl) {
2198 goto fail;
2199 }
2200 }
2201 } else {
2202 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2203 switch (type) {
2204 case 1:
2205 case 2:
2206 case 3:
2207 case 9:
2208 case 11:
2209 break;
2210 default:
2211 goto fail;
2212 }
2213 if (dpl < cpl || dpl < rpl) {
2214 fail:
2215 CC_SRC = eflags & ~CC_Z;
2216 return 0;
2217 }
2218 }
2219 limit = get_seg_limit(e1, e2);
2220 CC_SRC = eflags | CC_Z;
2221 return limit;
2222 }
2223
helper_lar(CPUX86State * env,target_ulong selector1)2224 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2225 {
2226 uint32_t e1, e2, eflags, selector;
2227 int rpl, dpl, cpl, type;
2228
2229 selector = selector1 & 0xffff;
2230 eflags = cpu_cc_compute_all(env, CC_OP);
2231 if ((selector & 0xfffc) == 0) {
2232 goto fail;
2233 }
2234 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2235 goto fail;
2236 }
2237 rpl = selector & 3;
2238 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2239 cpl = env->hflags & HF_CPL_MASK;
2240 if (e2 & DESC_S_MASK) {
2241 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2242 /* conforming */
2243 } else {
2244 if (dpl < cpl || dpl < rpl) {
2245 goto fail;
2246 }
2247 }
2248 } else {
2249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2250 switch (type) {
2251 case 1:
2252 case 2:
2253 case 3:
2254 case 4:
2255 case 5:
2256 case 9:
2257 case 11:
2258 case 12:
2259 break;
2260 default:
2261 goto fail;
2262 }
2263 if (dpl < cpl || dpl < rpl) {
2264 fail:
2265 CC_SRC = eflags & ~CC_Z;
2266 return 0;
2267 }
2268 }
2269 CC_SRC = eflags | CC_Z;
2270 return e2 & 0x00f0ff00;
2271 }
2272
helper_verr(CPUX86State * env,target_ulong selector1)2273 void helper_verr(CPUX86State *env, target_ulong selector1)
2274 {
2275 uint32_t e1, e2, eflags, selector;
2276 int rpl, dpl, cpl;
2277
2278 selector = selector1 & 0xffff;
2279 eflags = cpu_cc_compute_all(env, CC_OP);
2280 if ((selector & 0xfffc) == 0) {
2281 goto fail;
2282 }
2283 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2284 goto fail;
2285 }
2286 if (!(e2 & DESC_S_MASK)) {
2287 goto fail;
2288 }
2289 rpl = selector & 3;
2290 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2291 cpl = env->hflags & HF_CPL_MASK;
2292 if (e2 & DESC_CS_MASK) {
2293 if (!(e2 & DESC_R_MASK)) {
2294 goto fail;
2295 }
2296 if (!(e2 & DESC_C_MASK)) {
2297 if (dpl < cpl || dpl < rpl) {
2298 goto fail;
2299 }
2300 }
2301 } else {
2302 if (dpl < cpl || dpl < rpl) {
2303 fail:
2304 CC_SRC = eflags & ~CC_Z;
2305 return;
2306 }
2307 }
2308 CC_SRC = eflags | CC_Z;
2309 }
2310
helper_verw(CPUX86State * env,target_ulong selector1)2311 void helper_verw(CPUX86State *env, target_ulong selector1)
2312 {
2313 uint32_t e1, e2, eflags, selector;
2314 int rpl, dpl, cpl;
2315
2316 selector = selector1 & 0xffff;
2317 eflags = cpu_cc_compute_all(env, CC_OP);
2318 if ((selector & 0xfffc) == 0) {
2319 goto fail;
2320 }
2321 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2322 goto fail;
2323 }
2324 if (!(e2 & DESC_S_MASK)) {
2325 goto fail;
2326 }
2327 rpl = selector & 3;
2328 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2329 cpl = env->hflags & HF_CPL_MASK;
2330 if (e2 & DESC_CS_MASK) {
2331 goto fail;
2332 } else {
2333 if (dpl < cpl || dpl < rpl) {
2334 goto fail;
2335 }
2336 if (!(e2 & DESC_W_MASK)) {
2337 fail:
2338 CC_SRC = eflags & ~CC_Z;
2339 return;
2340 }
2341 }
2342 CC_SRC = eflags | CC_Z;
2343 }
2344