1 /////////////////////////////////////////////////////////////////////////
2 // $Id: segment_ctrl_pro.cc 14086 2021-01-30 08:35:35Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (C) 2001-2015 The Bochs Project
6 //
7 // This library is free software; you can redistribute it and/or
8 // modify it under the terms of the GNU Lesser General Public
9 // License as published by the Free Software Foundation; either
10 // version 2 of the License, or (at your option) any later version.
11 //
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 // Lesser General Public License for more details.
16 //
17 // You should have received a copy of the GNU Lesser General Public
18 // License along with this library; if not, write to the Free Software
19 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20 /////////////////////////////////////////////////////////////////////////
21
22 #define NEED_CPU_REG_SHORTCUTS 1
23 #include "bochs.h"
24 #include "cpu.h"
25 #define LOG_THIS BX_CPU_THIS_PTR
26
27 void BX_CPP_AttrRegparmN(2)
load_seg_reg(bx_segment_reg_t * seg,Bit16u new_value)28 BX_CPU_C::load_seg_reg(bx_segment_reg_t *seg, Bit16u new_value)
29 {
30 if (protected_mode())
31 {
32 if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS])
33 {
34 bx_selector_t ss_selector;
35 bx_descriptor_t descriptor;
36 Bit32u dword1, dword2;
37
38 parse_selector(new_value, &ss_selector);
39
40 if ((new_value & 0xfffc) == 0) { /* null selector */
41 #if BX_SUPPORT_X86_64
42 // allow SS = 0 in 64 bit mode only with cpl != 3 and rpl=cpl
43 if (long64_mode() && CPL != 3 && ss_selector.rpl == CPL) {
44 load_null_selector(seg, new_value);
45 return;
46 }
47 #endif
48 BX_ERROR(("load_seg_reg(SS): loading null selector"));
49 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
50 }
51
52 fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
53
54 /* selector's RPL must = CPL, else #GP(selector) */
55 if (ss_selector.rpl != CPL) {
56 BX_ERROR(("load_seg_reg(SS): rpl != CPL"));
57 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
58 }
59
60 parse_descriptor(dword1, dword2, &descriptor);
61
62 if (descriptor.valid==0) {
63 BX_ERROR(("load_seg_reg(SS): valid bit cleared"));
64 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
65 }
66
67 /* AR byte must indicate a writable data segment else #GP(selector) */
68 if (descriptor.segment==0 || IS_CODE_SEGMENT(descriptor.type) ||
69 IS_DATA_SEGMENT_WRITEABLE(descriptor.type) == 0)
70 {
71 BX_ERROR(("load_seg_reg(SS): not writable data segment"));
72 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
73 }
74
75 /* DPL in the AR byte must equal CPL else #GP(selector) */
76 if (descriptor.dpl != CPL) {
77 BX_ERROR(("load_seg_reg(SS): dpl != CPL"));
78 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
79 }
80
81 /* segment must be marked PRESENT else #SS(selector) */
82 if (! IS_PRESENT(descriptor)) {
83 BX_ERROR(("load_seg_reg(SS): not present"));
84 exception(BX_SS_EXCEPTION, new_value & 0xfffc);
85 }
86
87 touch_segment(&ss_selector, &descriptor);
88
89 /* load SS with selector, load SS cache with descriptor */
90 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector = ss_selector;
91 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache = descriptor;
92 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache;
93
94 invalidate_stack_cache();
95
96 return;
97 }
98 else if ((seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS]) ||
99 (seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES]) ||
100 (seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS]) ||
101 (seg==&BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS])
102 )
103 {
104 bx_descriptor_t descriptor;
105 bx_selector_t selector;
106 Bit32u dword1, dword2;
107
108 if ((new_value & 0xfffc) == 0) { /* null selector */
109 load_null_selector(seg, new_value);
110 return;
111 }
112
113 parse_selector(new_value, &selector);
114 fetch_raw_descriptor(&selector, &dword1, &dword2, BX_GP_EXCEPTION);
115 parse_descriptor(dword1, dword2, &descriptor);
116
117 if (descriptor.valid==0) {
118 BX_ERROR(("load_seg_reg(%s, 0x%04x): invalid segment", strseg(seg), new_value));
119 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
120 }
121
122 /* AR byte must indicate data or readable code segment else #GP(selector) */
123 if (descriptor.segment==0 || (IS_CODE_SEGMENT(descriptor.type) &&
124 IS_CODE_SEGMENT_READABLE(descriptor.type) == 0))
125 {
126 BX_ERROR(("load_seg_reg(%s, 0x%04x): not data or readable code", strseg(seg), new_value));
127 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
128 }
129
130 /* If data or non-conforming code, then both the RPL and the CPL
131 * must be less than or equal to DPL in AR byte else #GP(selector) */
132 if (IS_DATA_SEGMENT(descriptor.type) ||
133 IS_CODE_SEGMENT_NON_CONFORMING(descriptor.type))
134 {
135 if ((selector.rpl > descriptor.dpl) || (CPL > descriptor.dpl)) {
136 BX_ERROR(("load_seg_reg(%s, 0x%04x): RPL & CPL must be <= DPL", strseg(seg), new_value));
137 exception(BX_GP_EXCEPTION, new_value & 0xfffc);
138 }
139 }
140
141 /* segment must be marked PRESENT else #NP(selector) */
142 if (! IS_PRESENT(descriptor)) {
143 BX_ERROR(("load_seg_reg(%s, 0x%04x): segment not present", strseg(seg), new_value));
144 exception(BX_NP_EXCEPTION, new_value & 0xfffc);
145 }
146
147 touch_segment(&selector, &descriptor);
148
149 /* load segment register with selector */
150 /* load segment register-cache with descriptor */
151 seg->selector = selector;
152 seg->cache = descriptor;
153 seg->cache.valid = SegValidCache;
154
155 return;
156 }
157 else {
158 BX_PANIC(("load_seg_reg(): invalid segment register passed!"));
159 return;
160 }
161 }
162
163 /* real or v8086 mode */
164
165 /* www.x86.org:
166 According to Intel, each time any segment register is loaded in real
167 mode, the base address is calculated as 16 times the segment value,
168 while the access rights and size limit attributes are given fixed,
169 "real-mode compatible" values. This is not true. In fact, only the CS
170 descriptor caches for the 286, 386, and 486 get loaded with fixed
171 values each time the segment register is loaded. Loading CS, or any
172 other segment register in real mode, on later Intel processors doesn't
173 change the access rights or the segment size limit attributes stored
174 in the descriptor cache registers. For these segments, the access
175 rights and segment size limit attributes from any previous setting are
176 honored. */
177
178 seg->selector.value = new_value;
179 seg->selector.rpl = real_mode() ? 0 : 3;
180 seg->cache.valid = SegValidCache;
181 seg->cache.u.segment.base = new_value << 4;
182 seg->cache.segment = 1; /* regular segment */
183 seg->cache.p = 1; /* present */
184
185 /* Do not modify segment limit and AR bytes when in real mode */
186 /* Support for big real mode */
187 if (!real_mode()) {
188 seg->cache.type = BX_DATA_READ_WRITE_ACCESSED;
189 seg->cache.dpl = 3; /* we are in v8086 mode */
190 seg->cache.u.segment.limit_scaled = 0xffff;
191 seg->cache.u.segment.g = 0; /* byte granular */
192 seg->cache.u.segment.d_b = 0; /* default 16bit size */
193 #if BX_SUPPORT_X86_64
194 seg->cache.u.segment.l = 0; /* default 16bit size */
195 #endif
196 seg->cache.u.segment.avl = 0;
197 }
198
199 if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS]) {
200 invalidate_prefetch_q();
201 updateFetchModeMask(/* CS reloaded */);
202 #if BX_CPU_LEVEL >= 4
203 handleAlignmentCheck(/* CPL change */);
204 #endif
205 }
206
207 if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS])
208 invalidate_stack_cache();
209 }
210
211 void BX_CPP_AttrRegparmN(2)
load_null_selector(bx_segment_reg_t * seg,unsigned value)212 BX_CPU_C::load_null_selector(bx_segment_reg_t *seg, unsigned value)
213 {
214 BX_ASSERT((value & 0xfffc) == 0);
215
216 seg->selector.index = 0;
217 seg->selector.ti = 0;
218 seg->selector.rpl = BX_SELECTOR_RPL(value);
219 seg->selector.value = value;
220
221 seg->cache.valid = 0; /* invalidate null selector */
222 seg->cache.p = 0;
223 seg->cache.dpl = 0;
224 seg->cache.segment = 1; /* data/code segment */
225 seg->cache.type = 0;
226
227 seg->cache.u.segment.base = 0;
228 seg->cache.u.segment.limit_scaled = 0;
229 seg->cache.u.segment.g = 0;
230 seg->cache.u.segment.d_b = 0;
231 seg->cache.u.segment.avl = 0;
232 #if BX_SUPPORT_X86_64
233 seg->cache.u.segment.l = 0;
234 #endif
235
236 invalidate_stack_cache();
237 }
238
validate_seg_reg(unsigned seg)239 BX_CPP_INLINE void BX_CPU_C::validate_seg_reg(unsigned seg)
240 {
241 /*
242 FOR (seg = ES, DS, FS, GS)
243 DO
244 IF ((seg.attr.dpl < CPL) && ((seg.attr.type = 'data')
245 || (seg.attr.type = 'non-conforming-code')))
246 {
247 seg = NULL // can't use lower dpl data segment at higher cpl
248 }
249 END
250 */
251
252 bx_segment_reg_t *segment = &BX_CPU_THIS_PTR sregs[seg];
253
254 if (segment->cache.dpl < CPL)
255 {
256 // invalidate if data or non-conforming code segment
257 if (segment->cache.valid==0 || segment->cache.segment==0 ||
258 IS_DATA_SEGMENT(segment->cache.type) ||
259 IS_CODE_SEGMENT_NON_CONFORMING(segment->cache.type))
260 {
261 segment->selector.value = 0;
262 segment->cache.valid = 0;
263 }
264 }
265 }
266
validate_seg_regs(void)267 void BX_CPU_C::validate_seg_regs(void)
268 {
269 validate_seg_reg(BX_SEG_REG_ES);
270 validate_seg_reg(BX_SEG_REG_DS);
271 validate_seg_reg(BX_SEG_REG_FS);
272 validate_seg_reg(BX_SEG_REG_GS);
273 }
274
parse_selector(Bit16u raw_selector,bx_selector_t * selector)275 void parse_selector(Bit16u raw_selector, bx_selector_t *selector)
276 {
277 selector->value = raw_selector;
278 selector->index = raw_selector >> 3;
279 selector->ti = (raw_selector >> 2) & 0x01;
280 selector->rpl = raw_selector & 0x03;
281 }
282
get_ar_byte(const bx_descriptor_t * d)283 Bit8u get_ar_byte(const bx_descriptor_t *d)
284 {
285 return (d->type) |
286 (d->segment << 4) |
287 (d->dpl << 5) |
288 (d->p << 7);
289 }
290
set_ar_byte(bx_descriptor_t * d,Bit8u ar_byte)291 void set_ar_byte(bx_descriptor_t *d, Bit8u ar_byte)
292 {
293 d->p = (ar_byte >> 7) & 0x01;
294 d->dpl = (ar_byte >> 5) & 0x03;
295 d->segment = (ar_byte >> 4) & 0x01;
296 d->type = (ar_byte & 0x0f);
297 }
298
299 Bit32u BX_CPP_AttrRegparmN(1)
get_descriptor_l(const bx_descriptor_t * d)300 BX_CPU_C::get_descriptor_l(const bx_descriptor_t *d)
301 {
302 Bit32u limit = d->u.segment.limit_scaled;
303 if (d->u.segment.g)
304 limit >>= 12;
305
306 Bit32u val = ((d->u.segment.base & 0xffff) << 16) | (limit & 0xffff);
307
308 if (d->segment || !d->valid) {
309 return(val);
310 }
311 else {
312 switch (d->type) {
313 case BX_SYS_SEGMENT_LDT:
314 case BX_SYS_SEGMENT_AVAIL_286_TSS:
315 case BX_SYS_SEGMENT_BUSY_286_TSS:
316 case BX_SYS_SEGMENT_AVAIL_386_TSS:
317 case BX_SYS_SEGMENT_BUSY_386_TSS:
318 return(val);
319
320 default:
321 BX_ERROR(("#get_descriptor_l(): type %d not finished", d->type));
322 return(0);
323 }
324 }
325 }
326
327 Bit32u BX_CPP_AttrRegparmN(1)
get_descriptor_h(const bx_descriptor_t * d)328 BX_CPU_C::get_descriptor_h(const bx_descriptor_t *d)
329 {
330 Bit32u val;
331
332 Bit32u limit = d->u.segment.limit_scaled;
333 if (d->u.segment.g)
334 limit >>= 12;
335
336 if (d->segment || !d->valid) {
337 val = (d->u.segment.base & 0xff000000) |
338 ((d->u.segment.base >> 16) & 0x000000ff) |
339 (d->type << 8) |
340 (d->segment << 12) |
341 (d->dpl << 13) |
342 (d->p << 15) | (limit & 0xf0000) |
343 (d->u.segment.avl << 20) |
344 #if BX_SUPPORT_X86_64
345 (d->u.segment.l << 21) |
346 #endif
347 (d->u.segment.d_b << 22) |
348 (d->u.segment.g << 23);
349 return(val);
350 }
351 else {
352 switch (d->type) {
353 case BX_SYS_SEGMENT_AVAIL_286_TSS:
354 case BX_SYS_SEGMENT_BUSY_286_TSS:
355 case BX_SYS_SEGMENT_LDT:
356 case BX_SYS_SEGMENT_AVAIL_386_TSS:
357 case BX_SYS_SEGMENT_BUSY_386_TSS:
358 val = ((d->u.segment.base >> 16) & 0xff) |
359 (d->type << 8) |
360 (d->dpl << 13) |
361 (d->p << 15) | (limit & 0xf0000) |
362 (d->u.segment.avl << 20) |
363 (d->u.segment.d_b << 22) |
364 (d->u.segment.g << 23) |
365 (d->u.segment.base & 0xff000000);
366 return(val);
367
368 default:
369 BX_ERROR(("#get_descriptor_h(): type %d not finished", d->type));
370 return(0);
371 }
372 }
373 }
374
set_segment_ar_data(bx_segment_reg_t * seg,bool valid,Bit16u raw_selector,bx_address base,Bit32u limit_scaled,Bit16u ar_data)375 bool BX_CPU_C::set_segment_ar_data(bx_segment_reg_t *seg, bool valid,
376 Bit16u raw_selector, bx_address base, Bit32u limit_scaled, Bit16u ar_data)
377 {
378 parse_selector(raw_selector, &seg->selector);
379
380 bx_descriptor_t *d = &seg->cache;
381
382 d->p = (ar_data >> 7) & 0x1;
383 d->dpl = (ar_data >> 5) & 0x3;
384 d->segment = (ar_data >> 4) & 0x1;
385 d->type = (ar_data & 0x0f);
386
387 d->valid = valid;
388
389 if (d->segment || !valid) { /* data/code segment descriptors */
390 d->u.segment.g = (ar_data >> 15) & 0x1;
391 d->u.segment.d_b = (ar_data >> 14) & 0x1;
392 #if BX_SUPPORT_X86_64
393 d->u.segment.l = (ar_data >> 13) & 0x1;
394 #endif
395 d->u.segment.avl = (ar_data >> 12) & 0x1;
396
397 d->u.segment.base = base;
398 d->u.segment.limit_scaled = limit_scaled;
399 }
400 else {
401 switch(d->type) {
402 case BX_SYS_SEGMENT_LDT:
403 case BX_SYS_SEGMENT_AVAIL_286_TSS:
404 case BX_SYS_SEGMENT_BUSY_286_TSS:
405 case BX_SYS_SEGMENT_AVAIL_386_TSS:
406 case BX_SYS_SEGMENT_BUSY_386_TSS:
407 d->u.segment.avl = (ar_data >> 12) & 0x1;
408 d->u.segment.d_b = (ar_data >> 14) & 0x1;
409 d->u.segment.g = (ar_data >> 15) & 0x1;
410 d->u.segment.base = base;
411 d->u.segment.limit_scaled = limit_scaled;
412 break;
413
414 default:
415 BX_ERROR(("set_segment_ar_data(): case %u unsupported, valid=%d", (unsigned) d->type, d->valid));
416 }
417 }
418
419 return d->valid;
420 }
421
parse_descriptor(Bit32u dword1,Bit32u dword2,bx_descriptor_t * temp)422 void parse_descriptor(Bit32u dword1, Bit32u dword2, bx_descriptor_t *temp)
423 {
424 Bit8u AR_byte;
425 Bit32u limit;
426
427 AR_byte = dword2 >> 8;
428 temp->p = (AR_byte >> 7) & 0x1;
429 temp->dpl = (AR_byte >> 5) & 0x3;
430 temp->segment = (AR_byte >> 4) & 0x1;
431 temp->type = (AR_byte & 0xf);
432 temp->valid = 0; /* start out invalid */
433
434 if (temp->segment) { /* data/code segment descriptors */
435 limit = (dword1 & 0xffff) | (dword2 & 0x000F0000);
436
437 temp->u.segment.base = (dword1 >> 16) | ((dword2 & 0xFF) << 16);
438 temp->u.segment.g = (dword2 & 0x00800000) > 0;
439 temp->u.segment.d_b = (dword2 & 0x00400000) > 0;
440 #if BX_SUPPORT_X86_64
441 temp->u.segment.l = (dword2 & 0x00200000) > 0;
442 #endif
443 temp->u.segment.avl = (dword2 & 0x00100000) > 0;
444 temp->u.segment.base |= (dword2 & 0xFF000000);
445
446 if (temp->u.segment.g)
447 temp->u.segment.limit_scaled = (limit << 12) | 0xfff;
448 else
449 temp->u.segment.limit_scaled = limit;
450
451 temp->valid = 1;
452 }
453 else { // system & gate segment descriptors
454 switch (temp->type) {
455 case BX_286_CALL_GATE:
456 case BX_286_INTERRUPT_GATE:
457 case BX_286_TRAP_GATE:
458 // param count only used for call gate
459 temp->u.gate.param_count = dword2 & 0x1f;
460 temp->u.gate.dest_selector = dword1 >> 16;
461 temp->u.gate.dest_offset = dword1 & 0xffff;
462 temp->valid = 1;
463 break;
464
465 case BX_386_CALL_GATE:
466 case BX_386_INTERRUPT_GATE:
467 case BX_386_TRAP_GATE:
468 // param count only used for call gate
469 temp->u.gate.param_count = dword2 & 0x1f;
470 temp->u.gate.dest_selector = dword1 >> 16;
471 temp->u.gate.dest_offset = (dword2 & 0xffff0000) |
472 (dword1 & 0x0000ffff);
473 temp->valid = 1;
474 break;
475
476 case BX_TASK_GATE:
477 temp->u.taskgate.tss_selector = dword1 >> 16;
478 temp->valid = 1;
479 break;
480
481 case BX_SYS_SEGMENT_LDT:
482 case BX_SYS_SEGMENT_AVAIL_286_TSS:
483 case BX_SYS_SEGMENT_BUSY_286_TSS:
484 case BX_SYS_SEGMENT_AVAIL_386_TSS:
485 case BX_SYS_SEGMENT_BUSY_386_TSS:
486 limit = (dword1 & 0xffff) | (dword2 & 0x000F0000);
487 temp->u.segment.base = (dword1 >> 16) |
488 ((dword2 & 0xff) << 16) | (dword2 & 0xff000000);
489 temp->u.segment.g = (dword2 & 0x00800000) > 0;
490 temp->u.segment.d_b = (dword2 & 0x00400000) > 0;
491 temp->u.segment.avl = (dword2 & 0x00100000) > 0;
492 if (temp->u.segment.g)
493 temp->u.segment.limit_scaled = (limit << 12) | 0xfff;
494 else
495 temp->u.segment.limit_scaled = limit;
496 temp->valid = 1;
497 break;
498
499 default: // reserved
500 temp->valid = 0;
501 break;
502 }
503 }
504 }
505
506 void BX_CPP_AttrRegparmN(2)
touch_segment(bx_selector_t * selector,bx_descriptor_t * descriptor)507 BX_CPU_C::touch_segment(bx_selector_t *selector, bx_descriptor_t *descriptor)
508 {
509 if (! IS_SEGMENT_ACCESSED(descriptor->type)) {
510 Bit8u AR_byte = get_ar_byte(descriptor);
511 AR_byte |= 1;
512 descriptor->type |= 1;
513
514 if (selector->ti == 0) { /* GDT */
515 system_write_byte(BX_CPU_THIS_PTR gdtr.base + selector->index*8 + 5, AR_byte);
516 }
517 else { /* LDT */
518 system_write_byte(BX_CPU_THIS_PTR ldtr.cache.u.segment.base + selector->index*8 + 5, AR_byte);
519 }
520 }
521 }
522
523 void BX_CPP_AttrRegparmN(3)
load_ss(bx_selector_t * selector,bx_descriptor_t * descriptor,Bit8u cpl)524 BX_CPU_C::load_ss(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl)
525 {
526 // Add cpl to the selector value.
527 selector->value = (BX_SELECTOR_RPL_MASK & selector->value) | cpl;
528
529 if ((selector->value & BX_SELECTOR_RPL_MASK) != 0)
530 touch_segment(selector, descriptor);
531
532 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector = *selector;
533 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache = *descriptor;
534 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl = cpl;
535 BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache;
536
537 invalidate_stack_cache();
538 }
539
fetch_raw_descriptor(const bx_selector_t * selector,Bit32u * dword1,Bit32u * dword2,unsigned exception_no)540 void BX_CPU_C::fetch_raw_descriptor(const bx_selector_t *selector,
541 Bit32u *dword1, Bit32u *dword2, unsigned exception_no)
542 {
543 Bit32u index = selector->index;
544 bx_address offset;
545 Bit64u raw_descriptor;
546
547 if (selector->ti == 0) { /* GDT */
548 if ((index*8 + 7) > BX_CPU_THIS_PTR gdtr.limit) {
549 BX_ERROR(("fetch_raw_descriptor: GDT: index (%x) %x > limit (%x)",
550 index*8 + 7, index, BX_CPU_THIS_PTR gdtr.limit));
551 exception(exception_no, selector->value & 0xfffc);
552 }
553 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
554 }
555 else { /* LDT */
556 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
557 BX_ERROR(("fetch_raw_descriptor: LDTR.valid=0"));
558 exception(exception_no, selector->value & 0xfffc);
559 }
560 if ((index*8 + 7) > BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled) {
561 BX_ERROR(("fetch_raw_descriptor: LDT: index (%x) %x > limit (%x)",
562 index*8 + 7, index, BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled));
563 exception(exception_no, selector->value & 0xfffc);
564 }
565 offset = BX_CPU_THIS_PTR ldtr.cache.u.segment.base + index*8;
566 }
567
568 raw_descriptor = system_read_qword(offset);
569
570 *dword1 = GET32L(raw_descriptor);
571 *dword2 = GET32H(raw_descriptor);
572 }
573
574 bool BX_CPP_AttrRegparmN(3)
fetch_raw_descriptor2(const bx_selector_t * selector,Bit32u * dword1,Bit32u * dword2)575 BX_CPU_C::fetch_raw_descriptor2(const bx_selector_t *selector, Bit32u *dword1, Bit32u *dword2)
576 {
577 Bit32u index = selector->index;
578 bx_address offset;
579 Bit64u raw_descriptor;
580
581 if (selector->ti == 0) { /* GDT */
582 if ((index*8 + 7) > BX_CPU_THIS_PTR gdtr.limit)
583 return 0;
584 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
585 }
586 else { /* LDT */
587 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
588 BX_ERROR(("fetch_raw_descriptor2: LDTR.valid=0"));
589 return 0;
590 }
591 if ((index*8 + 7) > BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled)
592 return 0;
593 offset = BX_CPU_THIS_PTR ldtr.cache.u.segment.base + index*8;
594 }
595
596 raw_descriptor = system_read_qword(offset);
597
598 *dword1 = GET32L(raw_descriptor);
599 *dword2 = GET32H(raw_descriptor);
600
601 return 1;
602 }
603
604 #if BX_SUPPORT_X86_64
fetch_raw_descriptor_64(const bx_selector_t * selector,Bit32u * dword1,Bit32u * dword2,Bit32u * dword3,unsigned exception_no)605 void BX_CPU_C::fetch_raw_descriptor_64(const bx_selector_t *selector,
606 Bit32u *dword1, Bit32u *dword2, Bit32u *dword3, unsigned exception_no)
607 {
608 Bit32u index = selector->index;
609 bx_address offset;
610 Bit64u raw_descriptor1, raw_descriptor2;
611
612 if (selector->ti == 0) { /* GDT */
613 if ((index*8 + 15) > BX_CPU_THIS_PTR gdtr.limit) {
614 BX_ERROR(("fetch_raw_descriptor64: GDT: index (%x) %x > limit (%x)",
615 index*8 + 15, index, BX_CPU_THIS_PTR gdtr.limit));
616 exception(exception_no, selector->value & 0xfffc);
617 }
618 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
619 }
620 else { /* LDT */
621 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
622 BX_ERROR(("fetch_raw_descriptor64: LDTR.valid=0"));
623 exception(exception_no, selector->value & 0xfffc);
624 }
625 if ((index*8 + 15) > BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled) {
626 BX_ERROR(("fetch_raw_descriptor64: LDT: index (%x) %x > limit (%x)",
627 index*8 + 15, index, BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled));
628 exception(exception_no, selector->value & 0xfffc);
629 }
630 offset = BX_CPU_THIS_PTR ldtr.cache.u.segment.base + index*8;
631 }
632
633 raw_descriptor1 = system_read_qword(offset);
634 raw_descriptor2 = system_read_qword(offset + 8);
635
636 if (raw_descriptor2 & BX_CONST64(0x00001F0000000000)) {
637 BX_ERROR(("fetch_raw_descriptor64: extended attributes DWORD4 TYPE != 0"));
638 exception(BX_GP_EXCEPTION, selector->value & 0xfffc);
639 }
640
641 *dword1 = GET32L(raw_descriptor1);
642 *dword2 = GET32H(raw_descriptor1);
643 *dword3 = GET32L(raw_descriptor2);
644 }
645
fetch_raw_descriptor2_64(const bx_selector_t * selector,Bit32u * dword1,Bit32u * dword2,Bit32u * dword3)646 bool BX_CPU_C::fetch_raw_descriptor2_64(const bx_selector_t *selector, Bit32u *dword1, Bit32u *dword2, Bit32u *dword3)
647 {
648 Bit32u index = selector->index;
649 bx_address offset;
650 Bit64u raw_descriptor1, raw_descriptor2;
651
652 if (selector->ti == 0) { /* GDT */
653 if ((index*8 + 15) > BX_CPU_THIS_PTR gdtr.limit) {
654 BX_ERROR(("fetch_raw_descriptor2_64: GDT: index (%x) %x > limit (%x)",
655 index*8 + 15, index, BX_CPU_THIS_PTR gdtr.limit));
656 return 0;
657 }
658 offset = BX_CPU_THIS_PTR gdtr.base + index*8;
659 }
660 else { /* LDT */
661 if (BX_CPU_THIS_PTR ldtr.cache.valid==0) {
662 BX_ERROR(("fetch_raw_descriptor2_64: LDTR.valid=0"));
663 return 0;
664 }
665 if ((index*8 + 15) > BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled) {
666 BX_ERROR(("fetch_raw_descriptor2_64: LDT: index (%x) %x > limit (%x)",
667 index*8 + 15, index, BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled));
668 return 0;
669 }
670 offset = BX_CPU_THIS_PTR ldtr.cache.u.segment.base + index*8;
671 }
672
673 raw_descriptor1 = system_read_qword(offset);
674 raw_descriptor2 = system_read_qword(offset + 8);
675
676 if (raw_descriptor2 & BX_CONST64(0x00001F0000000000)) {
677 BX_ERROR(("fetch_raw_descriptor2_64: extended attributes DWORD4 TYPE != 0"));
678 return 0;
679 }
680
681 *dword1 = GET32L(raw_descriptor1);
682 *dword2 = GET32H(raw_descriptor1);
683 *dword3 = GET32L(raw_descriptor2);
684
685 return 1;
686 }
687 #endif
688