1 ////////////////////////////////////////////////////////////////////////
2 // $Id: call_far.cc 13699 2019-12-20 07:42:07Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2005-2019 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
12 //
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
17 //
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 //
22 /////////////////////////////////////////////////////////////////////////
23
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
28
29 void BX_CPP_AttrRegparmN(3)
call_protected(bxInstruction_c * i,Bit16u cs_raw,bx_address disp)30 BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
31 {
32 bx_selector_t cs_selector;
33 Bit32u dword1, dword2;
34 bx_descriptor_t cs_descriptor;
35
36 /* new cs selector must not be null, else #GP(0) */
37 if ((cs_raw & 0xfffc) == 0) {
38 BX_DEBUG(("call_protected: CS selector null"));
39 exception(BX_GP_EXCEPTION, 0);
40 }
41
42 parse_selector(cs_raw, &cs_selector);
43 // check new CS selector index within its descriptor limits,
44 // else #GP(new CS selector)
45 fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
46 parse_descriptor(dword1, dword2, &cs_descriptor);
47
48 // examine AR byte of selected descriptor for various legal values
49 if (cs_descriptor.valid==0) {
50 BX_ERROR(("call_protected: invalid CS descriptor"));
51 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
52 }
53
54 if (cs_descriptor.segment) // normal segment
55 {
56 check_cs(&cs_descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw), CPL);
57
58 #if BX_SUPPORT_CET
59 bx_address temp_LIP = get_laddr(BX_SEG_REG_CS, RIP);
60 Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
61 #endif
62
63 #if BX_SUPPORT_X86_64
64 if (long_mode() && cs_descriptor.u.segment.l) {
65 Bit64u temp_rsp = RSP;
66 // moving to long mode, push return address onto 64-bit stack
67 if (i->os64L()) {
68 write_new_stack_qword(temp_rsp - 8, cs_descriptor.dpl,
69 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
70 write_new_stack_qword(temp_rsp - 16, cs_descriptor.dpl, RIP);
71 temp_rsp -= 16;
72 }
73 else if (i->os32L()) {
74 write_new_stack_dword(temp_rsp - 4, cs_descriptor.dpl,
75 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
76 write_new_stack_dword(temp_rsp - 8, cs_descriptor.dpl, EIP);
77 temp_rsp -= 8;
78 }
79 else {
80 write_new_stack_word(temp_rsp - 2, cs_descriptor.dpl,
81 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
82 write_new_stack_word(temp_rsp - 4, cs_descriptor.dpl, IP);
83 temp_rsp -= 4;
84 }
85
86 // load code segment descriptor into CS cache
87 // load CS with new code segment selector
88 // set RPL of CS to CPL
89 branch_far(&cs_selector, &cs_descriptor, disp, CPL);
90 RSP = temp_rsp;
91 }
92 else
93 #endif
94 {
95 Bit32u temp_RSP;
96
97 // moving to legacy mode, push return address onto 32-bit stack
98 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
99 temp_RSP = ESP;
100 else
101 temp_RSP = SP;
102
103 #if BX_SUPPORT_X86_64
104 if (i->os64L()) {
105 write_new_stack_qword(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
106 temp_RSP - 8, cs_descriptor.dpl,
107 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
108 write_new_stack_qword(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
109 temp_RSP - 16, cs_descriptor.dpl, RIP);
110 temp_RSP -= 16;
111 }
112 else
113 #endif
114 if (i->os32L()) {
115 write_new_stack_dword(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
116 temp_RSP - 4, cs_descriptor.dpl,
117 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
118 write_new_stack_dword(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
119 temp_RSP - 8, cs_descriptor.dpl, EIP);
120 temp_RSP -= 8;
121 }
122 else {
123 write_new_stack_word(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
124 temp_RSP - 2, cs_descriptor.dpl,
125 BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
126 write_new_stack_word(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
127 temp_RSP - 4, cs_descriptor.dpl, IP);
128 temp_RSP -= 4;
129 }
130
131 // load code segment descriptor into CS cache
132 // load CS with new code segment selector
133 // set RPL of CS to CPL
134 branch_far(&cs_selector, &cs_descriptor, disp, CPL);
135
136 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
137 ESP = (Bit32u) temp_RSP;
138 else
139 SP = (Bit16u) temp_RSP;
140 }
141
142 #if BX_SUPPORT_CET
143 if (ShadowStackEnabled(CPL)) {
144 call_far_shadow_stack_push(old_CS, temp_LIP, SSP);
145 }
146 track_indirect(CPL);
147 #endif
148
149 return;
150 }
151 else { // gate & special segment
152 bx_descriptor_t gate_descriptor = cs_descriptor;
153 bx_selector_t gate_selector = cs_selector;
154
155 // descriptor DPL must be >= CPL else #GP(gate selector)
156 if (gate_descriptor.dpl < CPL) {
157 BX_ERROR(("call_protected: descriptor.dpl < CPL"));
158 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
159 }
160
161 // descriptor DPL must be >= gate selector RPL else #GP(gate selector)
162 if (gate_descriptor.dpl < gate_selector.rpl) {
163 BX_ERROR(("call_protected: descriptor.dpl < selector.rpl"));
164 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
165 }
166
167 #if BX_SUPPORT_X86_64
168 if (long_mode()) {
169 // call gate type is higher priority than non-present bit check
170 if (gate_descriptor.type != BX_386_CALL_GATE) {
171 BX_ERROR(("call_protected: gate type %u unsupported in long mode", (unsigned) gate_descriptor.type));
172 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
173 }
174 // gate descriptor must be present else #NP(gate selector)
175 if (! IS_PRESENT(gate_descriptor)) {
176 BX_ERROR(("call_protected: call gate not present"));
177 exception(BX_NP_EXCEPTION, cs_raw & 0xfffc);
178 }
179
180 call_gate64(&gate_selector);
181 return;
182 }
183 #endif
184
185 switch (gate_descriptor.type) {
186 case BX_SYS_SEGMENT_AVAIL_286_TSS:
187 case BX_SYS_SEGMENT_AVAIL_386_TSS:
188 if (gate_descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS)
189 BX_DEBUG(("call_protected: 16bit available TSS"));
190 else
191 BX_DEBUG(("call_protected: 32bit available TSS"));
192
193 if (gate_descriptor.valid==0 || gate_selector.ti) {
194 BX_ERROR(("call_protected: call bad TSS selector !"));
195 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
196 }
197
198 // TSS must be present, else #NP(TSS selector)
199 if (! IS_PRESENT(gate_descriptor)) {
200 BX_ERROR(("call_protected: call not present TSS !"));
201 exception(BX_NP_EXCEPTION, cs_raw & 0xfffc);
202 }
203
204 // SWITCH_TASKS _without_ nesting to TSS
205 task_switch(i, &gate_selector, &gate_descriptor,
206 BX_TASK_FROM_CALL, dword1, dword2);
207 return;
208
209 case BX_TASK_GATE:
210 task_gate(i, &gate_selector, &gate_descriptor, BX_TASK_FROM_CALL);
211 return;
212
213 case BX_286_CALL_GATE:
214 case BX_386_CALL_GATE:
215 // gate descriptor must be present else #NP(gate selector)
216 if (! IS_PRESENT(gate_descriptor)) {
217 BX_ERROR(("call_protected: gate not present"));
218 exception(BX_NP_EXCEPTION, cs_raw & 0xfffc);
219 }
220 call_gate(&gate_descriptor);
221 return;
222
223 default: // can't get here
224 BX_ERROR(("call_protected(): gate.type(%u) unsupported", (unsigned) gate_descriptor.type));
225 exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
226 }
227 }
228 }
229
call_gate(bx_descriptor_t * gate_descriptor)230 void BX_CPP_AttrRegparmN(1) BX_CPU_C::call_gate(bx_descriptor_t *gate_descriptor)
231 {
232 bx_selector_t cs_selector;
233 Bit32u dword1, dword2;
234 bx_descriptor_t cs_descriptor;
235
236 // examine code segment selector in call gate descriptor
237 BX_DEBUG(("call_gate: call gate"));
238
239 Bit16u dest_selector = gate_descriptor->u.gate.dest_selector;
240 Bit32u new_EIP = gate_descriptor->u.gate.dest_offset;
241
242 // selector must not be null else #GP(0)
243 if ((dest_selector & 0xfffc) == 0) {
244 BX_ERROR(("call_gate: selector in gate null"));
245 exception(BX_GP_EXCEPTION, 0);
246 }
247
248 parse_selector(dest_selector, &cs_selector);
249 // selector must be within its descriptor table limits,
250 // else #GP(code segment selector)
251 fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
252 parse_descriptor(dword1, dword2, &cs_descriptor);
253
254 // AR byte of selected descriptor must indicate code segment,
255 // else #GP(code segment selector)
256 // DPL of selected descriptor must be <= CPL,
257 // else #GP(code segment selector)
258 if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
259 IS_DATA_SEGMENT(cs_descriptor.type) || cs_descriptor.dpl > CPL)
260 {
261 BX_ERROR(("call_gate: selected descriptor is not code"));
262 exception(BX_GP_EXCEPTION, dest_selector & 0xfffc);
263 }
264
265 // code segment must be present else #NP(selector)
266 if (! IS_PRESENT(cs_descriptor)) {
267 BX_ERROR(("call_gate: code segment not present !"));
268 exception(BX_NP_EXCEPTION, dest_selector & 0xfffc);
269 }
270
271 // CALL GATE TO MORE PRIVILEGE
272 // if non-conforming code segment and DPL < CPL then
273 if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && (cs_descriptor.dpl < CPL))
274 {
275 Bit16u SS_for_cpl_x;
276 Bit32u ESP_for_cpl_x;
277 bx_selector_t ss_selector;
278 bx_descriptor_t ss_descriptor;
279 Bit16u return_SS, return_CS;
280 Bit32u return_ESP, return_EIP;
281
282 BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));
283
284 // get new SS selector for new privilege level from TSS
285 get_SS_ESP_from_TSS(cs_descriptor.dpl, &SS_for_cpl_x, &ESP_for_cpl_x);
286
287 // check selector & descriptor for new SS:
288 // selector must not be null, else #TS(0)
289 if ((SS_for_cpl_x & 0xfffc) == 0) {
290 BX_ERROR(("call_gate: new SS null"));
291 exception(BX_TS_EXCEPTION, 0);
292 }
293
294 // selector index must be within its descriptor table limits,
295 // else #TS(SS selector)
296 parse_selector(SS_for_cpl_x, &ss_selector);
297 fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
298 parse_descriptor(dword1, dword2, &ss_descriptor);
299
300 // selector's RPL must equal DPL of code segment,
301 // else #TS(SS selector)
302 if (ss_selector.rpl != cs_descriptor.dpl) {
303 BX_ERROR(("call_gate: SS selector.rpl != CS descr.dpl"));
304 exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
305 }
306
307 // stack segment DPL must equal DPL of code segment,
308 // else #TS(SS selector)
309 if (ss_descriptor.dpl != cs_descriptor.dpl) {
310 BX_ERROR(("call_gate: SS descr.rpl != CS descr.dpl"));
311 exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
312 }
313
314 // descriptor must indicate writable data segment,
315 // else #TS(SS selector)
316 if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
317 IS_CODE_SEGMENT(ss_descriptor.type) || !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
318 {
319 BX_ERROR(("call_gate: ss descriptor is not writable data seg"));
320 exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
321 }
322
323 // segment must be present, else #SS(SS selector)
324 if (! IS_PRESENT(ss_descriptor)) {
325 BX_ERROR(("call_gate: ss descriptor not present"));
326 exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc);
327 }
328
329 // get word count from call gate, mask to 5 bits
330 unsigned param_count = gate_descriptor->u.gate.param_count & 0x1f;
331
332 // save return SS:eSP to be pushed on new stack
333 return_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
334 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
335 return_ESP = ESP;
336 else
337 return_ESP = SP;
338
339 // save return CS:eIP to be pushed on new stack
340 return_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
341 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
342 return_EIP = EIP;
343 else
344 return_EIP = IP;
345
346 // Prepare new stack segment
347 bx_segment_reg_t new_stack;
348 new_stack.selector = ss_selector;
349 new_stack.cache = ss_descriptor;
350 new_stack.selector.rpl = cs_descriptor.dpl;
351 // add cpl to the selector value
352 new_stack.selector.value = (0xfffc & new_stack.selector.value) |
353 new_stack.selector.rpl;
354
355 /* load new SS:SP value from TSS */
356 if (ss_descriptor.u.segment.d_b) {
357 Bit32u temp_ESP = ESP_for_cpl_x;
358
359 // push pointer of old stack onto new stack
360 if (gate_descriptor->type==BX_386_CALL_GATE) {
361 write_new_stack_dword(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_SS);
362 write_new_stack_dword(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_ESP);
363 temp_ESP -= 8;
364
365 for (unsigned n=param_count; n>0; n--) {
366 temp_ESP -= 4;
367 Bit32u param = stack_read_dword(return_ESP + (n-1)*4);
368 write_new_stack_dword(&new_stack, temp_ESP, cs_descriptor.dpl, param);
369 }
370 // push return address onto new stack
371 write_new_stack_dword(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_CS);
372 write_new_stack_dword(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_EIP);
373 temp_ESP -= 8;
374 }
375 else {
376 write_new_stack_word(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_SS);
377 write_new_stack_word(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_ESP);
378 temp_ESP -= 4;
379
380 for (unsigned n=param_count; n>0; n--) {
381 temp_ESP -= 2;
382 Bit16u param = stack_read_word(return_ESP + (n-1)*2);
383 write_new_stack_word(&new_stack, temp_ESP, cs_descriptor.dpl, param);
384 }
385 // push return address onto new stack
386 write_new_stack_word(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_CS);
387 write_new_stack_word(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_EIP);
388 temp_ESP -= 4;
389 }
390
391 ESP = temp_ESP;
392 }
393 else {
394 Bit16u temp_SP = (Bit16u) ESP_for_cpl_x;
395
396 // push pointer of old stack onto new stack
397 if (gate_descriptor->type==BX_386_CALL_GATE) {
398 write_new_stack_dword(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, return_SS);
399 write_new_stack_dword(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, return_ESP);
400 temp_SP -= 8;
401
402 for (unsigned n=param_count; n>0; n--) {
403 temp_SP -= 4;
404 Bit32u param = stack_read_dword(return_ESP + (n-1)*4);
405 write_new_stack_dword(&new_stack, temp_SP, cs_descriptor.dpl, param);
406 }
407 // push return address onto new stack
408 write_new_stack_dword(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, return_CS);
409 write_new_stack_dword(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, return_EIP);
410 temp_SP -= 8;
411 }
412 else {
413 write_new_stack_word(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, return_SS);
414 write_new_stack_word(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) return_ESP);
415 temp_SP -= 4;
416
417 for (unsigned n=param_count; n>0; n--) {
418 temp_SP -= 2;
419 Bit16u param = stack_read_word(return_ESP + (n-1)*2);
420 write_new_stack_word(&new_stack, temp_SP, cs_descriptor.dpl, param);
421 }
422 // push return address onto new stack
423 write_new_stack_word(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, return_CS);
424 write_new_stack_word(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) return_EIP);
425 temp_SP -= 4;
426 }
427
428 SP = temp_SP;
429 }
430
431 // new eIP must be in code segment limit else #GP(0)
432 if (new_EIP > cs_descriptor.u.segment.limit_scaled) {
433 BX_ERROR(("call_gate: EIP not within CS limits"));
434 exception(BX_GP_EXCEPTION, 0);
435 }
436
437 #if BX_SUPPORT_CET
438 bx_address temp_LIP = get_laddr(BX_SEG_REG_CS, return_EIP);
439 unsigned old_SS_DPL = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl;
440 unsigned old_CPL = CPL;
441 #endif
442
443 /* load SS descriptor */
444 load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
445
446 /* load new CS:IP value from gate */
447 /* load CS descriptor */
448 /* set CPL to stack segment DPL */
449 /* set RPL of CS to CPL */
450 load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);
451
452 EIP = new_EIP;
453
454 #if BX_SUPPORT_CET
455 if (ShadowStackEnabled(old_CPL)) {
456 if (old_CPL == 3)
457 BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] = SSP;
458 }
459
460 if(ShadowStackEnabled(CPL)) {
461 bx_address old_SSP = SSP;
462 shadow_stack_switch(BX_CPU_THIS_PTR msr.ia32_pl_ssp[CPL]);
463 if (old_SS_DPL != 3)
464 call_far_shadow_stack_push(return_CS, temp_LIP, old_SSP);
465 }
466 track_indirect(CPL);
467 #endif
468 }
469 else // CALL GATE TO SAME PRIVILEGE
470 {
471 BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));
472
473 if (gate_descriptor->type == BX_386_CALL_GATE) {
474 // call gate 32bit, push return address onto stack
475 push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
476 push_32(EIP);
477 }
478 else {
479 // call gate 16bit, push return address onto stack
480 push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
481 push_16(IP);
482 }
483
484 #if BX_SUPPORT_CET
485 Bit32u temp_LIP = get_segment_base(BX_SEG_REG_CS) + ((gate_descriptor->type == BX_386_CALL_GATE) ? EIP : IP);
486 Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
487 #endif
488
489 // load CS:EIP from gate
490 // load code segment descriptor into CS register
491 // set RPL of CS to CPL
492 branch_far(&cs_selector, &cs_descriptor, new_EIP, CPL);
493
494 #if BX_SUPPORT_CET
495 if (ShadowStackEnabled(CPL)) {
496 call_far_shadow_stack_push(old_CS, temp_LIP, SSP);
497 }
498 track_indirect(CPL);
499 #endif
500 }
501 }
502
503 #if BX_SUPPORT_X86_64
call_gate64(bx_selector_t * gate_selector)504 void BX_CPP_AttrRegparmN(1) BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
505 {
506 bx_selector_t cs_selector;
507 Bit32u dword1, dword2, dword3;
508 bx_descriptor_t cs_descriptor;
509 bx_descriptor_t gate_descriptor;
510
511 // examine code segment selector in call gate descriptor
512 BX_DEBUG(("call_gate64: CALL 64bit call gate"));
513
514 fetch_raw_descriptor_64(gate_selector, &dword1, &dword2, &dword3, BX_GP_EXCEPTION);
515 parse_descriptor(dword1, dword2, &gate_descriptor);
516
517 Bit16u dest_selector = gate_descriptor.u.gate.dest_selector;
518 // selector must not be null else #GP(0)
519 if ((dest_selector & 0xfffc) == 0) {
520 BX_ERROR(("call_gate64: selector in gate null"));
521 exception(BX_GP_EXCEPTION, 0);
522 }
523
524 parse_selector(dest_selector, &cs_selector);
525 // selector must be within its descriptor table limits,
526 // else #GP(code segment selector)
527 fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
528 parse_descriptor(dword1, dword2, &cs_descriptor);
529
530 // find the RIP in the gate_descriptor
531 Bit64u new_RIP = gate_descriptor.u.gate.dest_offset;
532 new_RIP |= ((Bit64u)dword3 << 32);
533
534 // AR byte of selected descriptor must indicate code segment,
535 // else #GP(code segment selector)
536 // DPL of selected descriptor must be <= CPL,
537 // else #GP(code segment selector)
538 if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
539 IS_DATA_SEGMENT(cs_descriptor.type) ||
540 cs_descriptor.dpl > CPL)
541 {
542 BX_ERROR(("call_gate64: selected descriptor is not code"));
543 exception(BX_GP_EXCEPTION, dest_selector & 0xfffc);
544 }
545
546 // In long mode, only 64-bit call gates are allowed, and they must point
547 // to 64-bit code segments, else #GP(selector)
548 if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_descriptor.u.segment.d_b)
549 {
550 BX_ERROR(("call_gate64: not 64-bit code segment in call gate 64"));
551 exception(BX_GP_EXCEPTION, dest_selector & 0xfffc);
552 }
553
554 // code segment must be present else #NP(selector)
555 if (! IS_PRESENT(cs_descriptor)) {
556 BX_ERROR(("call_gate64: code segment not present !"));
557 exception(BX_NP_EXCEPTION, dest_selector & 0xfffc);
558 }
559
560 Bit64u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
561 Bit64u old_RIP = RIP;
562
563 #if BX_SUPPORT_CET
564 bx_address temp_LIP = get_laddr(BX_SEG_REG_CS, RIP);
565 unsigned old_SS_DPL = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl;
566 unsigned old_CPL = CPL;
567 #endif
568
569 // CALL GATE TO MORE PRIVILEGE
570 // if non-conforming code segment and DPL < CPL then
571 if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && (cs_descriptor.dpl < CPL))
572 {
573 BX_DEBUG(("CALL GATE64 TO MORE PRIVILEGE LEVEL"));
574
575 // get new RSP for new privilege level from TSS
576 Bit64u RSP_for_cpl_x = get_RSP_from_TSS(cs_descriptor.dpl);
577 Bit64u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
578 Bit64u old_RSP = RSP;
579
580 // push old stack long pointer onto new stack
581 write_new_stack_qword(RSP_for_cpl_x - 8, cs_descriptor.dpl, old_SS);
582 write_new_stack_qword(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
583 // push long pointer to return address onto new stack
584 write_new_stack_qword(RSP_for_cpl_x - 24, cs_descriptor.dpl, old_CS);
585 write_new_stack_qword(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_RIP);
586 RSP_for_cpl_x -= 32;
587
588 // load CS:RIP (guaranteed to be in 64 bit mode)
589 branch_far(&cs_selector, &cs_descriptor, new_RIP, cs_descriptor.dpl);
590
591 // set up null SS descriptor
592 load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], cs_descriptor.dpl);
593
594 RSP = RSP_for_cpl_x;
595
596 #if BX_SUPPORT_CET
597 if(ShadowStackEnabled(old_CPL)) {
598 if (old_CPL == 3)
599 BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] = SSP;
600 }
601 if(ShadowStackEnabled(CPL)) {
602 bx_address old_SSP = SSP;
603 shadow_stack_switch(BX_CPU_THIS_PTR msr.ia32_pl_ssp[CPL]);
604 if (old_SS_DPL != 3)
605 call_far_shadow_stack_push(old_CS, temp_LIP, old_SSP);
606 }
607 track_indirect(CPL);
608 #endif
609 }
610 else
611 {
612 BX_DEBUG(("CALL GATE64 TO SAME PRIVILEGE"));
613
614 // push to 64-bit stack, switch to long64 guaranteed
615 write_new_stack_qword(RSP - 8, CPL, old_CS);
616 write_new_stack_qword(RSP - 16, CPL, old_RIP);
617
618 // load CS:RIP (guaranteed to be in 64 bit mode)
619 branch_far(&cs_selector, &cs_descriptor, new_RIP, CPL);
620
621 RSP -= 16;
622
623 #if BX_SUPPORT_CET
624 if (ShadowStackEnabled(CPL)) {
625 call_far_shadow_stack_push(old_CS, temp_LIP, SSP);
626 }
627 track_indirect(CPL);
628 #endif
629 }
630 }
631
632 #if BX_SUPPORT_CET
shadow_stack_switch(bx_address new_SSP)633 void BX_CPP_AttrRegparmN(1) BX_CPU_C::shadow_stack_switch(bx_address new_SSP)
634 {
635 SSP = new_SSP;
636
637 if (SSP & 0x7) {
638 BX_ERROR(("shadow_stack_switch: SSP is not aligned to 8 byte boundary"));
639 exception(BX_GP_EXCEPTION, 0);
640 }
641 if (!long64_mode() && GET32H(SSP) != 0) {
642 BX_ERROR(("shadow_stack_switch: 64-bit SSP not in 64-bit mode"));
643 exception(BX_GP_EXCEPTION, 0);
644 }
645 if (!shadow_stack_atomic_set_busy(SSP, CPL)) {
646 BX_ERROR(("shadow_stack_switch: failure to set busy bit"));
647 exception(BX_GP_EXCEPTION, 0);
648 }
649 }
650
call_far_shadow_stack_push(Bit16u cs,bx_address lip,bx_address old_ssp)651 void BX_CPP_AttrRegparmN(1) BX_CPU_C::call_far_shadow_stack_push(Bit16u cs, bx_address lip, bx_address old_ssp)
652 {
653 if (SSP & 0x7) {
654 shadow_stack_write_dword(SSP-4, CPL, 0);
655 SSP &= ~BX_CONST64(0x7);
656 }
657
658 shadow_stack_push_64(cs);
659 shadow_stack_push_64(lip);
660 shadow_stack_push_64(old_ssp);
661 }
662 #endif
663
664 #endif
665