1 ////////////////////////////////////////////////////////////////////////
2 // $Id: iret.cc 13699 2019-12-20 07:42:07Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2005-2019 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
12 //
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
17 //
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 //
22 /////////////////////////////////////////////////////////////////////////
23
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
28
29 void BX_CPP_AttrRegparmN(1)
iret_protected(bxInstruction_c * i)30 BX_CPU_C::iret_protected(bxInstruction_c *i)
31 {
32 Bit16u raw_cs_selector, raw_ss_selector;
33 bx_selector_t cs_selector, ss_selector;
34 Bit32u dword1, dword2;
35 bx_descriptor_t cs_descriptor, ss_descriptor;
36
37 #if BX_SUPPORT_X86_64
38 if (long_mode()) {
39 long_iret(i);
40 return;
41 }
42 #endif
43
44 if (BX_CPU_THIS_PTR get_NT()) /* NT = 1: RETURN FROM NESTED TASK */
45 {
46 /* what's the deal with NT & VM ? */
47 Bit16u raw_link_selector;
48 bx_selector_t link_selector;
49 bx_descriptor_t tss_descriptor;
50
51 if (BX_CPU_THIS_PTR get_VM())
52 BX_PANIC(("iret_protected: VM sholdn't be set here !"));
53
54 BX_DEBUG(("IRET: nested task return"));
55
56 if (BX_CPU_THIS_PTR tr.cache.valid==0)
57 BX_PANIC(("IRET: TR not valid"));
58
59 // examine back link selector in TSS addressed by current TR
60 raw_link_selector = system_read_word(BX_CPU_THIS_PTR tr.cache.u.segment.base);
61
62 // must specify global, else #TS(new TSS selector)
63 parse_selector(raw_link_selector, &link_selector);
64
65 if (link_selector.ti) {
66 BX_ERROR(("iret: link selector.ti=1"));
67 exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc);
68 }
69
70 // index must be within GDT limits, else #TS(new TSS selector)
71 fetch_raw_descriptor(&link_selector, &dword1, &dword2, BX_TS_EXCEPTION);
72
73 // AR byte must specify TSS, else #TS(new TSS selector)
74 // new TSS must be busy, else #TS(new TSS selector)
75 parse_descriptor(dword1, dword2, &tss_descriptor);
76 if (tss_descriptor.valid==0 || tss_descriptor.segment) {
77 BX_ERROR(("iret: TSS selector points to bad TSS"));
78 exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc);
79 }
80 if (tss_descriptor.type != BX_SYS_SEGMENT_BUSY_286_TSS &&
81 tss_descriptor.type != BX_SYS_SEGMENT_BUSY_386_TSS)
82 {
83 BX_ERROR(("iret: TSS selector points to bad TSS"));
84 exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc);
85 }
86
87 // TSS must be present, else #NP(new TSS selector)
88 if (! IS_PRESENT(tss_descriptor)) {
89 BX_ERROR(("iret: task descriptor.p == 0"));
90 exception(BX_NP_EXCEPTION, raw_link_selector & 0xfffc);
91 }
92
93 // switch tasks (without nesting) to TSS specified by back link selector
94 task_switch(i, &link_selector, &tss_descriptor,
95 BX_TASK_FROM_IRET, dword1, dword2);
96 return;
97 }
98
99 /* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
100 unsigned top_nbytes_same;
101 Bit32u new_eip = 0, new_esp, temp_ESP, new_eflags = 0;
102
103 /* 16bit opsize | 32bit opsize
104 * ==============================
105 * SS eSP+8 | SS eSP+16
106 * SP eSP+6 | ESP eSP+12
107 * -------------------------------
108 * FLAGS eSP+4 | EFLAGS eSP+8
109 * CS eSP+2 | CS eSP+4
110 * IP eSP+0 | EIP eSP+0
111 */
112
113 if (i->os32L()) {
114 top_nbytes_same = 12;
115 }
116 else {
117 top_nbytes_same = 6;
118 }
119
120 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
121 temp_ESP = ESP;
122 else
123 temp_ESP = SP;
124
125 if (i->os32L()) {
126 new_eflags = stack_read_dword(temp_ESP + 8);
127 raw_cs_selector = (Bit16u) stack_read_dword(temp_ESP + 4);
128 new_eip = stack_read_dword(temp_ESP + 0);
129
130 // if VM=1 in flags image on stack then STACK_RETURN_TO_V86
131 if (new_eflags & EFlagsVMMask) {
132 if (CPL == 0) {
133 stack_return_to_v86(new_eip, raw_cs_selector, new_eflags);
134 return;
135 }
136 else BX_INFO(("iret: VM set on stack, CPL!=0"));
137 }
138 }
139 else {
140 new_eflags = stack_read_word(temp_ESP + 4);
141 raw_cs_selector = stack_read_word(temp_ESP + 2);
142 new_eip = stack_read_word(temp_ESP + 0);
143 }
144
145 parse_selector(raw_cs_selector, &cs_selector);
146
147 // return CS selector must be non-null, else #GP(0)
148 if ((raw_cs_selector & 0xfffc) == 0) {
149 BX_ERROR(("iret: return CS selector null"));
150 exception(BX_GP_EXCEPTION, 0);
151 }
152
153 // selector index must be within descriptor table limits,
154 // else #GP(return selector)
155 fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
156 parse_descriptor(dword1, dword2, &cs_descriptor);
157
158 // return CS selector RPL must be >= CPL, else #GP(return selector)
159 if (cs_selector.rpl < CPL) {
160 BX_ERROR(("iret: return selector RPL < CPL"));
161 exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc);
162 }
163
164 // check code-segment descriptor
165 check_cs(&cs_descriptor, raw_cs_selector, 0, cs_selector.rpl);
166
167 if (cs_selector.rpl == CPL) {
168
169 BX_DEBUG(("INTERRUPT RETURN TO SAME PRIVILEGE LEVEL"));
170
171 #if BX_SUPPORT_CET
172 if (ShadowStackEnabled(CPL)) {
173 SSP = shadow_stack_restore(raw_cs_selector, cs_descriptor, new_eip);
174 }
175 #endif
176
177 /* load CS-cache with new code segment descriptor */
178 branch_far(&cs_selector, &cs_descriptor, new_eip, cs_selector.rpl);
179
180 /* top 6/12 bytes on stack must be within limits, else #SS(0) */
181 /* satisfied above */
182 if (i->os32L()) {
183 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
184 Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask |
185 EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;
186 #if BX_CPU_LEVEL >= 4
187 changeMask |= (EFlagsIDMask | EFlagsACMask); // ID/AC
188 #endif
189 if (CPL <= BX_CPU_THIS_PTR get_IOPL())
190 changeMask |= EFlagsIFMask;
191 if (CPL == 0)
192 changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
193
194 // IF only changed if (CPL <= EFLAGS.IOPL)
195 // VIF, VIP, IOPL only changed if CPL == 0
196 // VM unaffected
197 writeEFlags(new_eflags, changeMask);
198 }
199 else {
200 /* load flags with third word on stack */
201 write_flags((Bit16u) new_eflags, CPL==0, CPL<=BX_CPU_THIS_PTR get_IOPL());
202 }
203
204 /* increment stack by 6/12 */
205 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
206 ESP += top_nbytes_same;
207 else
208 SP += top_nbytes_same;
209 }
210 else {
211
212 BX_DEBUG(("INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL"));
213
214 /* 16bit opsize | 32bit opsize
215 * ==============================
216 * SS eSP+8 | SS eSP+16
217 * SP eSP+6 | ESP eSP+12
218 * FLAGS eSP+4 | EFLAGS eSP+8
219 * CS eSP+2 | CS eSP+4
220 * IP eSP+0 | EIP eSP+0
221 */
222
223 /* examine return SS selector and associated descriptor */
224 if (i->os32L()) {
225 raw_ss_selector = stack_read_word(temp_ESP + 16);
226 }
227 else {
228 raw_ss_selector = stack_read_word(temp_ESP + 8);
229 }
230
231 /* selector must be non-null, else #GP(0) */
232 if ((raw_ss_selector & 0xfffc) == 0) {
233 BX_ERROR(("iret: SS selector null"));
234 exception(BX_GP_EXCEPTION, 0);
235 }
236
237 parse_selector(raw_ss_selector, &ss_selector);
238
239 /* selector RPL must = RPL of return CS selector,
240 * else #GP(SS selector) */
241 if (ss_selector.rpl != cs_selector.rpl) {
242 BX_ERROR(("iret: SS.rpl != CS.rpl"));
243 exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
244 }
245
246 /* selector index must be within its descriptor table limits,
247 * else #GP(SS selector) */
248 fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
249
250 parse_descriptor(dword1, dword2, &ss_descriptor);
251
252 /* AR byte must indicate a writable data segment,
253 * else #GP(SS selector) */
254 if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
255 IS_CODE_SEGMENT(ss_descriptor.type) ||
256 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
257 {
258 BX_ERROR(("iret: SS AR byte not writable or code segment"));
259 exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
260 }
261
262 /* stack segment DPL must equal the RPL of the return CS selector,
263 * else #GP(SS selector) */
264 if (ss_descriptor.dpl != cs_selector.rpl) {
265 BX_ERROR(("iret: SS.dpl != CS selector RPL"));
266 exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
267 }
268
269 /* SS must be present, else #NP(SS selector) */
270 if (! IS_PRESENT(ss_descriptor)) {
271 BX_ERROR(("iret: SS not present!"));
272 exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc);
273 }
274
275 if (i->os32L()) {
276 new_esp = stack_read_dword(temp_ESP + 12);
277 }
278 else {
279 new_esp = stack_read_word(temp_ESP + 6);
280 }
281
282 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
283 Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask |
284 EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;
285 #if BX_CPU_LEVEL >= 4
286 changeMask |= (EFlagsIDMask | EFlagsACMask); // ID/AC
287 #endif
288 if (CPL <= BX_CPU_THIS_PTR get_IOPL())
289 changeMask |= EFlagsIFMask;
290 if (CPL == 0)
291 changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
292
293 if (! i->os32L()) // 16 bit
294 changeMask &= 0xffff;
295
296 #if BX_SUPPORT_CET
297 unsigned prev_cpl = CPL;
298 bx_address new_SSP = BX_CPU_THIS_PTR msr.ia32_pl_ssp[3];
299 if (ShadowStackEnabled(CPL)) {
300 if (SSP & 0x7) {
301 BX_ERROR(("iret_protected: SSP is not 8-byte aligned"));
302 exception(BX_CP_EXCEPTION, BX_CP_FAR_RET_IRET);
303 }
304 if (cs_selector.rpl != 3) {
305 new_SSP = shadow_stack_restore(raw_cs_selector, cs_descriptor, new_eip);
306 }
307 }
308 #endif
309
310 /* load CS:EIP from stack */
311 /* load the CS-cache with CS descriptor */
312 /* set CPL to the RPL of the return CS selector */
313 branch_far(&cs_selector, &cs_descriptor, new_eip, cs_selector.rpl);
314
315 // IF only changed if (prev_CPL <= EFLAGS.IOPL)
316 // VIF, VIP, IOPL only changed if prev_CPL == 0
317 // VM unaffected
318 writeEFlags(new_eflags, changeMask);
319
320 // load SS:eSP from stack
321 // load the SS-cache with SS descriptor
322 load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
323 if (ss_descriptor.u.segment.d_b)
324 ESP = new_esp;
325 else
326 SP = new_esp;
327
328 #if BX_SUPPORT_CET
329 bx_address old_SSP = SSP;
330 if (ShadowStackEnabled(CPL)) {
331 if (GET32H(new_SSP) != 0) {
332 BX_ERROR(("iret_protected: 64-bit SSP in legacy mode"));
333 exception(BX_GP_EXCEPTION, 0);
334 }
335 SSP = new_SSP;
336 }
337 if (ShadowStackEnabled(prev_cpl)) {
338 shadow_stack_atomic_clear_busy(old_SSP, prev_cpl);
339 }
340 #endif
341
342 validate_seg_regs();
343 }
344 }
345
346 #if BX_SUPPORT_X86_64
347 void BX_CPP_AttrRegparmN(1)
long_iret(bxInstruction_c * i)348 BX_CPU_C::long_iret(bxInstruction_c *i)
349 {
350 Bit16u raw_cs_selector, raw_ss_selector;
351 bx_selector_t cs_selector, ss_selector;
352 Bit32u dword1, dword2;
353 bx_descriptor_t cs_descriptor, ss_descriptor;
354 Bit32u new_eflags;
355 Bit64u new_rip, new_rsp, temp_RSP;
356
357 BX_DEBUG (("LONG MODE IRET"));
358
359 if (BX_CPU_THIS_PTR get_NT()) {
360 BX_ERROR(("iret64: return from nested task in x86-64 mode !"));
361 exception(BX_GP_EXCEPTION, 0);
362 }
363
364 /* 64bit opsize
365 * ============
366 * SS eSP+32
367 * ESP eSP+24
368 * -------------
369 * EFLAGS eSP+16
370 * CS eSP+8
371 * EIP eSP+0
372 */
373
374 if (long64_mode()) temp_RSP = RSP;
375 else {
376 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) temp_RSP = ESP;
377 else temp_RSP = SP;
378 }
379
380 unsigned top_nbytes_same = 0; /* stop compiler warnings */
381
382 #if BX_SUPPORT_X86_64
383 if (i->os64L()) {
384 new_eflags = (Bit32u) stack_read_qword(temp_RSP + 16);
385 raw_cs_selector = (Bit16u) stack_read_qword(temp_RSP + 8);
386 new_rip = stack_read_qword(temp_RSP + 0);
387 top_nbytes_same = 24;
388 }
389 else
390 #endif
391 if (i->os32L()) {
392 new_eflags = stack_read_dword(temp_RSP + 8);
393 raw_cs_selector = (Bit16u) stack_read_dword(temp_RSP + 4);
394 new_rip = (Bit64u) stack_read_dword(temp_RSP + 0);
395 top_nbytes_same = 12;
396 }
397 else {
398 new_eflags = stack_read_word(temp_RSP + 4);
399 raw_cs_selector = stack_read_word(temp_RSP + 2);
400 new_rip = (Bit64u) stack_read_word(temp_RSP + 0);
401 top_nbytes_same = 6;
402 }
403
404 // ignore VM flag in long mode
405 new_eflags &= ~EFlagsVMMask;
406
407 parse_selector(raw_cs_selector, &cs_selector);
408
409 // return CS selector must be non-null, else #GP(0)
410 if ((raw_cs_selector & 0xfffc) == 0) {
411 BX_ERROR(("iret64: return CS selector null"));
412 exception(BX_GP_EXCEPTION, 0);
413 }
414
415 // selector index must be within descriptor table limits,
416 // else #GP(return selector)
417 fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
418 parse_descriptor(dword1, dword2, &cs_descriptor);
419
420 // return CS selector RPL must be >= CPL, else #GP(return selector)
421 if (cs_selector.rpl < CPL) {
422 BX_ERROR(("iret64: return selector RPL < CPL"));
423 exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc);
424 }
425
426 // check code-segment descriptor
427 check_cs(&cs_descriptor, raw_cs_selector, 0, cs_selector.rpl);
428
429 /* INTERRUPT RETURN TO SAME PRIVILEGE LEVEL */
430 if (cs_selector.rpl == CPL && !i->os64L())
431 {
432 BX_DEBUG(("LONG MODE INTERRUPT RETURN TO SAME PRIVILEGE LEVEL"));
433
434 /* top 24 bytes on stack must be within limits, else #SS(0) */
435 /* satisfied above */
436
437 #if BX_SUPPORT_CET
438 if (ShadowStackEnabled(CPL)) {
439 bx_address prev_SSP = SSP;
440 SSP = shadow_stack_restore(raw_cs_selector, cs_descriptor, new_rip);
441 if (SSP != prev_SSP) {
442 shadow_stack_atomic_clear_busy(SSP, CPL);
443 }
444 }
445 #endif
446
447 /* load CS:EIP from stack */
448 /* load CS-cache with new code segment descriptor */
449 branch_far(&cs_selector, &cs_descriptor, new_rip, CPL);
450
451 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
452 Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsDFMask |
453 EFlagsNTMask | EFlagsRFMask | EFlagsIDMask | EFlagsACMask;
454 if (CPL <= BX_CPU_THIS_PTR get_IOPL())
455 changeMask |= EFlagsIFMask;
456 if (CPL == 0)
457 changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
458
459 if (! i->os32L()) // 16 bit
460 changeMask &= 0xffff;
461
462 // IF only changed if (CPL <= EFLAGS.IOPL)
463 // VIF, VIP, IOPL only changed if CPL == 0
464 // VM unaffected
465 writeEFlags(new_eflags, changeMask);
466
467 /* we are NOT in 64-bit mode */
468 if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
469 ESP += top_nbytes_same;
470 else
471 SP += top_nbytes_same;
472 }
473 else {
474
475 BX_DEBUG(("LONG MODE INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL or 64 BIT MODE"));
476
477 /* 64bit opsize
478 * ============
479 * SS eSP+32
480 * ESP eSP+24
481 * EFLAGS eSP+16
482 * CS eSP+8
483 * EIP eSP+0
484 */
485
486 /* examine return SS selector and associated descriptor */
487 #if BX_SUPPORT_X86_64
488 if (i->os64L()) {
489 raw_ss_selector = (Bit16u) stack_read_qword(temp_RSP + 32);
490 new_rsp = stack_read_qword(temp_RSP + 24);
491 }
492 else
493 #endif
494 {
495 if (i->os32L()) {
496 raw_ss_selector = (Bit16u) stack_read_dword(temp_RSP + 16);
497 new_rsp = (Bit64u) stack_read_dword(temp_RSP + 12);
498 }
499 else {
500 raw_ss_selector = stack_read_word(temp_RSP + 8);
501 new_rsp = (Bit64u) stack_read_word(temp_RSP + 6);
502 }
503 }
504
505 if ((raw_ss_selector & 0xfffc) == 0) {
506 if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_selector.rpl == 3) {
507 BX_ERROR(("iret64: SS selector null"));
508 exception(BX_GP_EXCEPTION, 0);
509 }
510 }
511 else {
512 parse_selector(raw_ss_selector, &ss_selector);
513
514 /* selector RPL must = RPL of return CS selector,
515 * else #GP(SS selector) */
516 if (ss_selector.rpl != cs_selector.rpl) {
517 BX_ERROR(("iret64: SS.rpl != CS.rpl"));
518 exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
519 }
520
521 /* selector index must be within its descriptor table limits,
522 * else #GP(SS selector) */
523 fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
524 parse_descriptor(dword1, dword2, &ss_descriptor);
525
526 /* AR byte must indicate a writable data segment,
527 * else #GP(SS selector) */
528 if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
529 IS_CODE_SEGMENT(ss_descriptor.type) ||
530 !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
531 {
532 BX_ERROR(("iret64: SS AR byte not writable or code segment"));
533 exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
534 }
535
536 /* stack segment DPL must equal the RPL of the return CS selector,
537 * else #GP(SS selector) */
538 if (ss_descriptor.dpl != cs_selector.rpl) {
539 BX_ERROR(("iret64: SS.dpl != CS selector RPL"));
540 exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
541 }
542
543 /* SS must be present, else #NP(SS selector) */
544 if (! IS_PRESENT(ss_descriptor)) {
545 BX_ERROR(("iret64: SS not present!"));
546 exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc);
547 }
548 }
549
550 Bit8u prev_cpl = CPL; /* previous CPL */
551
552 // ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
553 Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsDFMask |
554 EFlagsNTMask | EFlagsRFMask | EFlagsIDMask | EFlagsACMask;
555 if (prev_cpl <= BX_CPU_THIS_PTR get_IOPL())
556 changeMask |= EFlagsIFMask;
557 if (prev_cpl == 0)
558 changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
559
560 if (! i->os32L()) // 16 bit
561 changeMask &= 0xffff;
562
563 #if BX_SUPPORT_CET
564 bx_address new_SSP = BX_CPU_THIS_PTR msr.ia32_pl_ssp[3];
565 if (ShadowStackEnabled(CPL)) {
566 if (SSP & 0x7) {
567 BX_ERROR(("iret64: SSP is not 8-byte aligned"));
568 exception(BX_CP_EXCEPTION, BX_CP_FAR_RET_IRET);
569 }
570 if (cs_selector.rpl != 3) {
571 new_SSP = shadow_stack_restore(raw_cs_selector, cs_descriptor, new_rip);
572 }
573 }
574 #endif
575
576 /* set CPL to the RPL of the return CS selector */
577 branch_far(&cs_selector, &cs_descriptor, new_rip, cs_selector.rpl);
578
579 // IF only changed if (prev_CPL <= EFLAGS.IOPL)
580 // VIF, VIP, IOPL only changed if prev_CPL == 0
581 // VM unaffected
582 writeEFlags(new_eflags, changeMask);
583
584 if ((raw_ss_selector & 0xfffc) != 0) {
585 // load SS:RSP from stack
586 // load the SS-cache with SS descriptor
587 load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
588 }
589 else {
590 // we are in 64-bit mode !
591 load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], raw_ss_selector);
592 }
593
594 if (long64_mode()) RSP = new_rsp;
595 else {
596 if (ss_descriptor.u.segment.d_b) ESP = (Bit32u) new_rsp;
597 else SP = (Bit16u) new_rsp;
598 }
599
600 #if BX_SUPPORT_CET
601 bx_address old_SSP = SSP;
602 if (ShadowStackEnabled(CPL)) {
603 if (!long64_mode() && GET32H(new_SSP) != 0) {
604 BX_ERROR(("iret64: 64-bit SSP in legacy mode"));
605 exception(BX_GP_EXCEPTION, 0);
606 }
607 SSP = new_SSP;
608 }
609 if (ShadowStackEnabled(prev_cpl)) {
610 shadow_stack_atomic_clear_busy(old_SSP, prev_cpl);
611 }
612 #endif
613
614 if (prev_cpl != CPL) validate_seg_regs();
615 }
616 }
617 #endif
618