1 /////////////////////////////////////////////////////////////////////////
2 // $Id: proc_ctrl.cc 14095 2021-01-30 18:47:25Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //  Copyright (C) 2001-2019  The Bochs Project
6 //
7 //  This library is free software; you can redistribute it and/or
8 //  modify it under the terms of the GNU Lesser General Public
9 //  License as published by the Free Software Foundation; either
10 //  version 2 of the License, or (at your option) any later version.
11 //
12 //  This library is distributed in the hope that it will be useful,
13 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
14 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 //  Lesser General Public License for more details.
16 //
17 //  You should have received a copy of the GNU Lesser General Public
18 //  License along with this library; if not, write to the Free Software
19 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20 //
21 /////////////////////////////////////////////////////////////////////////
22 
23 #define NEED_CPU_REG_SHORTCUTS 1
24 #include "bochs.h"
25 #include "cpu.h"
26 #define LOG_THIS BX_CPU_THIS_PTR
27 
28 #include "pc_system.h"
29 #include "gui/gui.h"
30 
31 #include "wide_int.h"
32 #include "decoder/ia_opcodes.h"
33 
BxError(bxInstruction_c * i)34 void BX_CPP_AttrRegparmN(1) BX_CPU_C::BxError(bxInstruction_c *i)
35 {
36   unsigned ia_opcode = i->getIaOpcode();
37 
38   if (ia_opcode == BX_IA_ERROR) {
39     BX_DEBUG(("BxError: Encountered an unknown instruction (signalling #UD)"));
40 
41 #if BX_DEBUGGER == 0 // with debugger it easy to see the #UD
42     if (LOG_THIS getonoff(LOGLEV_DEBUG))
43       debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip);
44 #endif
45   }
46   else {
47     BX_DEBUG(("%s: instruction not supported - signalling #UD", get_bx_opcode_name(ia_opcode)));
48     for (unsigned n=0; n<BX_ISA_EXTENSIONS_ARRAY_SIZE; n++)
49       BX_DEBUG(("ia_extensions_bitmask[%d]: %08x", n, BX_CPU_THIS_PTR ia_extensions_bitmask[n]));
50   }
51 
52   exception(BX_UD_EXCEPTION, 0);
53 
54   BX_NEXT_TRACE(i); // keep compiler happy
55 }
56 
UndefinedOpcode(bxInstruction_c * i)57 void BX_CPP_AttrRegparmN(1) BX_CPU_C::UndefinedOpcode(bxInstruction_c *i)
58 {
59   BX_DEBUG(("UndefinedOpcode: generate #UD exception"));
60   exception(BX_UD_EXCEPTION, 0);
61 
62   BX_NEXT_TRACE(i); // keep compiler happy
63 }
64 
NOP(bxInstruction_c * i)65 void BX_CPP_AttrRegparmN(1) BX_CPU_C::NOP(bxInstruction_c *i)
66 {
67   // No operation.
68 
69   BX_NEXT_INSTR(i);
70 }
71 
PAUSE(bxInstruction_c * i)72 void BX_CPP_AttrRegparmN(1) BX_CPU_C::PAUSE(bxInstruction_c *i)
73 {
74 #if BX_SUPPORT_VMX
75   if (BX_CPU_THIS_PTR in_vmx_guest)
76     VMexit_PAUSE();
77 #endif
78 
79 #if BX_SUPPORT_SVM
80   if (BX_CPU_THIS_PTR in_svm_guest) {
81     if (SVM_INTERCEPT(SVM_INTERCEPT0_PAUSE)) SvmInterceptPAUSE();
82   }
83 #endif
84 
85   BX_NEXT_INSTR(i);
86 }
87 
PREFETCH(bxInstruction_c * i)88 void BX_CPP_AttrRegparmN(1) BX_CPU_C::PREFETCH(bxInstruction_c *i)
89 {
90 #if BX_INSTRUMENTATION
91   BX_INSTR_PREFETCH_HINT(BX_CPU_ID, i->src(), i->seg(), BX_CPU_RESOLVE_ADDR(i));
92 #endif
93 
94   BX_NEXT_INSTR(i);
95 }
96 
CPUID(bxInstruction_c * i)97 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CPUID(bxInstruction_c *i)
98 {
99 #if BX_CPU_LEVEL >= 4
100 
101 #if BX_SUPPORT_VMX
102   if (BX_CPU_THIS_PTR in_vmx_guest) {
103     VMexit(VMX_VMEXIT_CPUID, 0);
104   }
105 #endif
106 
107 #if BX_SUPPORT_SVM
108   if (BX_CPU_THIS_PTR in_svm_guest) {
109     if (SVM_INTERCEPT(SVM_INTERCEPT0_CPUID)) Svm_Vmexit(SVM_VMEXIT_CPUID);
110   }
111 #endif
112 
113   struct cpuid_function_t leaf;
114   BX_CPU_THIS_PTR cpuid->get_cpuid_leaf(EAX, ECX, &leaf);
115 
116   RAX = leaf.eax;
117   RBX = leaf.ebx;
118   RCX = leaf.ecx;
119   RDX = leaf.edx;
120 #endif
121 
122   BX_NEXT_INSTR(i);
123 }
124 
125 //
126 // The shutdown state is very similar to the state following the exection
127 // if HLT instruction. In this mode the processor stops executing
128 // instructions until #NMI, #SMI, #RESET or #INIT is received. If
129 // shutdown occurs why in NMI interrupt handler or in SMM, a hardware
130 // reset must be used to restart the processor execution.
131 //
shutdown(void)132 void BX_CPU_C::shutdown(void)
133 {
134 #if BX_SUPPORT_SVM
135   if (BX_CPU_THIS_PTR in_svm_guest) {
136     if (SVM_INTERCEPT(SVM_INTERCEPT0_SHUTDOWN)) Svm_Vmexit(SVM_VMEXIT_SHUTDOWN);
137   }
138 #endif
139 
140   enter_sleep_state(BX_ACTIVITY_STATE_SHUTDOWN);
141 
142   longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
143 }
144 
enter_sleep_state(unsigned state)145 void BX_CPU_C::enter_sleep_state(unsigned state)
146 {
147   switch(state) {
148   case BX_ACTIVITY_STATE_ACTIVE:
149     BX_ASSERT(0); // should not be used for entering active CPU state
150     break;
151 
152   case BX_ACTIVITY_STATE_HLT:
153     break;
154 
155   case BX_ACTIVITY_STATE_WAIT_FOR_SIPI:
156     mask_event(BX_EVENT_INIT | BX_EVENT_SMI | BX_EVENT_NMI); // FIXME: all events should be masked
157     // fall through - mask interrupts as well
158 
159   case BX_ACTIVITY_STATE_SHUTDOWN:
160     BX_CPU_THIS_PTR clear_IF(); // masking interrupts
161     break;
162 
163   case BX_ACTIVITY_STATE_MWAIT:
164   case BX_ACTIVITY_STATE_MWAIT_IF:
165     break;
166 
167   default:
168     BX_PANIC(("enter_sleep_state: unknown state %d", state));
169   }
170 
171   // artificial trap bit, why use another variable.
172   BX_CPU_THIS_PTR activity_state = state;
173   BX_CPU_THIS_PTR async_event = 1; // so processor knows to check
174   // Execution completes.  The processor will remain in a sleep
175   // state until one of the wakeup conditions is met.
176 
177   BX_INSTR_HLT(BX_CPU_ID);
178 
179 #if BX_DEBUGGER
180   bx_dbg_halt(BX_CPU_ID);
181 #endif
182 
183 #if BX_USE_IDLE_HACK
184   bx_gui->sim_is_idle();
185 #endif
186 }
187 
HLT(bxInstruction_c * i)188 void BX_CPP_AttrRegparmN(1) BX_CPU_C::HLT(bxInstruction_c *i)
189 {
190   // CPL is always 0 in real mode
191   if (/* !real_mode() && */ CPL!=0) {
192     BX_DEBUG(("HLT: %s priveledge check failed, CPL=%d, generate #GP(0)",
193         cpu_mode_string(BX_CPU_THIS_PTR cpu_mode), CPL));
194     exception(BX_GP_EXCEPTION, 0);
195   }
196 
197   if (! BX_CPU_THIS_PTR get_IF()) {
198     BX_INFO(("WARNING: HLT instruction with IF=0!"));
199   }
200 
201 #if BX_SUPPORT_VMX
202   if (BX_CPU_THIS_PTR in_vmx_guest) {
203     if (VMEXIT(VMX_VM_EXEC_CTRL2_HLT_VMEXIT)) {
204       VMexit(VMX_VMEXIT_HLT, 0);
205     }
206   }
207 #endif
208 
209 #if BX_SUPPORT_SVM
210   if (BX_CPU_THIS_PTR in_svm_guest) {
211     if (SVM_INTERCEPT(SVM_INTERCEPT0_HLT)) Svm_Vmexit(SVM_VMEXIT_HLT);
212   }
213 #endif
214 
215   // stops instruction execution and places the processor in a
216   // HALT state. An enabled interrupt, NMI, or reset will resume
217   // execution. If interrupt (including NMI) is used to resume
218   // execution after HLT, the saved CS:eIP points to instruction
219   // following HLT.
220   enter_sleep_state(BX_ACTIVITY_STATE_HLT);
221 
222   BX_NEXT_TRACE(i);
223 }
224 
225 /* 0F 08 */
INVD(bxInstruction_c * i)226 void BX_CPP_AttrRegparmN(1) BX_CPU_C::INVD(bxInstruction_c *i)
227 {
228   // CPL is always 0 in real mode
229   if (/* !real_mode() && */ CPL!=0) {
230     BX_ERROR(("%s: priveledge check failed, generate #GP(0)", i->getIaOpcodeNameShort()));
231     exception(BX_GP_EXCEPTION, 0);
232   }
233 
234 #if BX_SUPPORT_VMX
235   if (BX_CPU_THIS_PTR in_vmx_guest) {
236     VMexit(VMX_VMEXIT_INVD, 0);
237   }
238 #endif
239 
240 #if BX_SUPPORT_SVM
241   if (BX_CPU_THIS_PTR in_svm_guest) {
242     if (SVM_INTERCEPT(SVM_INTERCEPT0_INVD)) Svm_Vmexit(SVM_VMEXIT_INVD);
243   }
244 #endif
245 
246   invalidate_prefetch_q();
247 
248   BX_DEBUG(("INVD: Flush internal caches !"));
249   BX_INSTR_CACHE_CNTRL(BX_CPU_ID, BX_INSTR_INVD);
250 
251   flushICaches();
252 
253   BX_NEXT_TRACE(i);
254 }
255 
256 /* 0F 09 */
WBINVD(bxInstruction_c * i)257 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WBINVD(bxInstruction_c *i)
258 {
259   // CPL is always 0 in real mode
260   if (/* !real_mode() && */ CPL!=0) {
261     BX_ERROR(("%s: priveledge check failed, generate #GP(0)", i->getIaOpcodeNameShort()));
262     exception(BX_GP_EXCEPTION, 0);
263   }
264 
265 #if BX_SUPPORT_VMX
266   if (BX_CPU_THIS_PTR in_vmx_guest) {
267     if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_WBINVD_VMEXIT)) {
268       VMexit(VMX_VMEXIT_WBINVD, 0);
269     }
270   }
271 #endif
272 
273 #if BX_SUPPORT_SVM
274   if (BX_CPU_THIS_PTR in_svm_guest) {
275     if (SVM_INTERCEPT(SVM_INTERCEPT1_WBINVD)) Svm_Vmexit(SVM_VMEXIT_WBINVD);
276   }
277 #endif
278 
279 //invalidate_prefetch_q();
280 
281   BX_DEBUG(("WBINVD: WB-Invalidate internal caches !"));
282   BX_INSTR_CACHE_CNTRL(BX_CPU_ID, BX_INSTR_WBINVD);
283 
284 //flushICaches();
285 
286   BX_NEXT_TRACE(i);
287 }
288 
CLFLUSH(bxInstruction_c * i)289 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
290 {
291   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
292   bx_address laddr;
293 
294   // CLFLUSH performs all the segmentation and paging checks that a 1-byte read would perform,
295   // except that it also allows references to execute-only segments.
296 #if BX_SUPPORT_X86_64
297   if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
298     laddr = get_laddr64(i->seg(), eaddr);
299   else
300 #endif
301     laddr = agen_read_execute32(i->seg(), (Bit32u)eaddr, 1);
302 
303   tickle_read_linear(i->seg(), laddr);
304 
305   BX_INSTR_CLFLUSH(BX_CPU_ID, laddr, BX_CPU_THIS_PTR address_xlation.paddress1);
306 
307   BX_NEXT_INSTR(i);
308 }
309 
CLZERO(bxInstruction_c * i)310 void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLZERO(bxInstruction_c *i)
311 {
312 #if BX_CPU_LEVEL >= 6
313   bx_address eaddr = RAX & ~BX_CONST64(CACHE_LINE_SIZE-1) & i->asize_mask();
314 
315   BxPackedZmmRegister zmmzero; // zmm is always made available even if EVEX is not compiled in
316   zmmzero.clear();
317   for (unsigned n=0; n<CACHE_LINE_SIZE; n += 64) {
318     write_virtual_zmmword(i->seg(), eaddr+n, &zmmzero);
319   }
320 #endif
321 
322   BX_NEXT_INSTR(i);
323 }
324 
handleCpuModeChange(void)325 void BX_CPU_C::handleCpuModeChange(void)
326 {
327   unsigned mode = BX_CPU_THIS_PTR cpu_mode;
328 
329 #if BX_SUPPORT_X86_64
330   if (BX_CPU_THIS_PTR efer.get_LMA()) {
331     if (! BX_CPU_THIS_PTR cr0.get_PE()) {
332       BX_PANIC(("change_cpu_mode: EFER.LMA is set when CR0.PE=0 !"));
333     }
334     if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
335       BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_64;
336     }
337     else {
338       BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_COMPAT;
339       // clear upper part of RIP/RSP when leaving 64-bit long mode
340       BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP);
341       BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSP);
342     }
343 
344     // switching between compatibility and long64 mode also affect SS.BASE
345     // which is always zero in long64 mode
346     invalidate_stack_cache();
347   }
348   else
349 #endif
350   {
351     if (BX_CPU_THIS_PTR cr0.get_PE()) {
352       if (BX_CPU_THIS_PTR get_VM()) {
353         BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_V8086;
354         CPL = 3;
355       }
356       else
357         BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_PROTECTED;
358     }
359     else {
360       BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_REAL;
361 
362       // CS segment in real mode always allows full access
363       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p        = 1;
364       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment  = 1;  /* data/code segment */
365       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
366 
367       CPL = 0;
368     }
369   }
370 
371   updateFetchModeMask();
372 
373 #if BX_CPU_LEVEL >= 6
374 #if BX_SUPPORT_AVX
375   handleAvxModeChange(); /* protected mode reloaded */
376 #endif
377 #endif
378 
379   // re-initialize protection keys
380 #if BX_SUPPORT_PKEYS
381   set_PKeys(BX_CPU_THIS_PTR pkru, BX_CPU_THIS_PTR pkrs);
382 #endif
383 
384   if (mode != BX_CPU_THIS_PTR cpu_mode) {
385     BX_DEBUG(("%s activated", cpu_mode_string(BX_CPU_THIS_PTR cpu_mode)));
386 #if BX_DEBUGGER
387     if (BX_CPU_THIS_PTR mode_break) {
388       BX_CPU_THIS_PTR stop_reason = STOP_MODE_BREAK_POINT;
389       bx_debug_break(); // trap into debugger
390     }
391 #endif
392   }
393 }
394 
395 #if BX_CPU_LEVEL >= 4
handleAlignmentCheck(void)396 void BX_CPU_C::handleAlignmentCheck(void)
397 {
398   if (CPL == 3 && BX_CPU_THIS_PTR cr0.get_AM() && BX_CPU_THIS_PTR get_AC()) {
399 #if BX_SUPPORT_ALIGNMENT_CHECK == 0
400     BX_PANIC(("WARNING: Alignment check (#AC exception) was not compiled in !"));
401 #else
402     BX_CPU_THIS_PTR alignment_check_mask = 0xF;
403 #endif
404   }
405 #if BX_SUPPORT_ALIGNMENT_CHECK
406   else {
407     BX_CPU_THIS_PTR alignment_check_mask = 0;
408   }
409 #endif
410 }
411 #endif
412 
413 #if BX_CPU_LEVEL >= 6
handleSseModeChange(void)414 void BX_CPU_C::handleSseModeChange(void)
415 {
416   if(BX_CPU_THIS_PTR cr0.get_TS()) {
417     BX_CPU_THIS_PTR sse_ok = 0;
418   }
419   else {
420     if(BX_CPU_THIS_PTR cr0.get_EM() || !BX_CPU_THIS_PTR cr4.get_OSFXSR())
421       BX_CPU_THIS_PTR sse_ok = 0;
422     else
423       BX_CPU_THIS_PTR sse_ok = 1;
424   }
425 
426   updateFetchModeMask(); /* SSE_OK changed */
427 }
428 
BxNoSSE(bxInstruction_c * i)429 void BX_CPP_AttrRegparmN(1) BX_CPU_C::BxNoSSE(bxInstruction_c *i)
430 {
431   if(BX_CPU_THIS_PTR cr0.get_EM() || !BX_CPU_THIS_PTR cr4.get_OSFXSR())
432     exception(BX_UD_EXCEPTION, 0);
433 
434   if(BX_CPU_THIS_PTR cr0.get_TS())
435     exception(BX_NM_EXCEPTION, 0);
436 
437   BX_ASSERT(0);
438 
439   BX_NEXT_TRACE(i); // keep compiler happy
440 }
441 
442 #if BX_SUPPORT_AVX
handleAvxModeChange(void)443 void BX_CPU_C::handleAvxModeChange(void)
444 {
445   if(BX_CPU_THIS_PTR cr0.get_TS()) {
446     BX_CPU_THIS_PTR avx_ok = 0;
447   }
448   else {
449     if (! protected_mode() || ! BX_CPU_THIS_PTR cr4.get_OSXSAVE() ||
450         (~BX_CPU_THIS_PTR xcr0.val32 & (BX_XCR0_SSE_MASK | BX_XCR0_YMM_MASK)) != 0) {
451       BX_CPU_THIS_PTR avx_ok = 0;
452     }
453     else {
454       BX_CPU_THIS_PTR avx_ok = 1;
455 
456 #if BX_SUPPORT_EVEX
457       if ((~BX_CPU_THIS_PTR xcr0.val32 & BX_XCR0_OPMASK_MASK) != 0) {
458         BX_CPU_THIS_PTR opmask_ok = BX_CPU_THIS_PTR evex_ok = 0;
459       }
460       else {
461         BX_CPU_THIS_PTR opmask_ok = 1;
462 
463         if ((~BX_CPU_THIS_PTR xcr0.val32 & (BX_XCR0_ZMM_HI256_MASK | BX_XCR0_HI_ZMM_MASK)) != 0)
464           BX_CPU_THIS_PTR evex_ok = 0;
465         else
466           BX_CPU_THIS_PTR evex_ok = 1;
467       }
468 #endif
469     }
470   }
471 
472 #if BX_SUPPORT_EVEX
473   if (! BX_CPU_THIS_PTR avx_ok)
474         BX_CPU_THIS_PTR opmask_ok = BX_CPU_THIS_PTR evex_ok = 0;
475 #endif
476 
477   updateFetchModeMask(); /* AVX_OK changed */
478 }
479 
BxNoAVX(bxInstruction_c * i)480 void BX_CPP_AttrRegparmN(1) BX_CPU_C::BxNoAVX(bxInstruction_c *i)
481 {
482   if (! protected_mode() || ! BX_CPU_THIS_PTR cr4.get_OSXSAVE())
483     exception(BX_UD_EXCEPTION, 0);
484 
485   if (~BX_CPU_THIS_PTR xcr0.val32 & (BX_XCR0_SSE_MASK | BX_XCR0_YMM_MASK))
486     exception(BX_UD_EXCEPTION, 0);
487 
488   if(BX_CPU_THIS_PTR cr0.get_TS())
489     exception(BX_NM_EXCEPTION, 0);
490 
491   BX_ASSERT(0);
492 
493   BX_NEXT_TRACE(i); // keep compiler happy
494 }
495 #endif
496 
497 #if BX_SUPPORT_EVEX
BxNoOpMask(bxInstruction_c * i)498 void BX_CPP_AttrRegparmN(1) BX_CPU_C::BxNoOpMask(bxInstruction_c *i)
499 {
500   if (! protected_mode() || ! BX_CPU_THIS_PTR cr4.get_OSXSAVE())
501     exception(BX_UD_EXCEPTION, 0);
502 
503   if (~BX_CPU_THIS_PTR xcr0.val32 & (BX_XCR0_SSE_MASK | BX_XCR0_YMM_MASK | BX_XCR0_OPMASK_MASK))
504     exception(BX_UD_EXCEPTION, 0);
505 
506   if(BX_CPU_THIS_PTR cr0.get_TS())
507     exception(BX_NM_EXCEPTION, 0);
508 
509   BX_ASSERT(0);
510 
511   BX_NEXT_TRACE(i); // keep compiler happy
512 }
513 
BxNoEVEX(bxInstruction_c * i)514 void BX_CPP_AttrRegparmN(1) BX_CPU_C::BxNoEVEX(bxInstruction_c *i)
515 {
516   if (! protected_mode() || ! BX_CPU_THIS_PTR cr4.get_OSXSAVE())
517     exception(BX_UD_EXCEPTION, 0);
518 
519   if (~BX_CPU_THIS_PTR xcr0.val32 & (BX_XCR0_SSE_MASK | BX_XCR0_YMM_MASK | BX_XCR0_OPMASK_MASK | BX_XCR0_ZMM_HI256_MASK | BX_XCR0_HI_ZMM_MASK))
520     exception(BX_UD_EXCEPTION, 0);
521 
522   if(BX_CPU_THIS_PTR cr0.get_TS())
523     exception(BX_NM_EXCEPTION, 0);
524 
525   BX_ASSERT(0);
526 
527   BX_NEXT_TRACE(i); // keep compiler happy
528 }
529 #endif
530 
531 #endif
532 
handleCpuContextChange(void)533 void BX_CPU_C::handleCpuContextChange(void)
534 {
535   TLB_flush();
536 
537   invalidate_prefetch_q();
538   invalidate_stack_cache();
539 
540   handleInterruptMaskChange();
541 
542 #if BX_CPU_LEVEL >= 4
543   handleAlignmentCheck();
544 #endif
545 
546   handleCpuModeChange();
547 
548 #if BX_CPU_LEVEL >= 6
549   handleSseModeChange();
550 #if BX_SUPPORT_AVX
551   handleAvxModeChange();
552 #endif
553 #endif
554 }
555 
RDPMC(bxInstruction_c * i)556 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPMC(bxInstruction_c *i)
557 {
558 #if BX_CPU_LEVEL >= 5
559   // in real mode CPL=0
560   if (! BX_CPU_THIS_PTR cr4.get_PCE() && CPL != 0 /* && protected_mode() */) {
561     BX_ERROR(("%s: not allowed to use instruction !", i->getIaOpcodeNameShort()));
562     exception(BX_GP_EXCEPTION, 0);
563   }
564 
565 #if BX_SUPPORT_VMX
566   if (BX_CPU_THIS_PTR in_vmx_guest)  {
567     if (VMEXIT(VMX_VM_EXEC_CTRL2_RDPMC_VMEXIT)) {
568       VMexit(VMX_VMEXIT_RDPMC, 0);
569     }
570   }
571 #endif
572 
573 #if BX_SUPPORT_SVM
574   if (BX_CPU_THIS_PTR in_svm_guest) {
575     if (SVM_INTERCEPT(SVM_INTERCEPT0_RDPMC)) Svm_Vmexit(SVM_VMEXIT_RDPMC);
576   }
577 #endif
578 
579   /* According to manual, Pentium 4 has 18 counters,
580    * previous versions have two.  And the P4 also can do
581    * short read-out (EDX always 0).  Otherwise it is
582    * limited to 40 bits.
583    */
584 
585   if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SSE2)) { // Pentium 4 processor (see cpuid.cc)
586     if ((ECX & 0x7fffffff) >= 18)
587       exception(BX_GP_EXCEPTION, 0);
588   }
589   else {
590     if ((ECX & 0xffffffff) >= 2)
591       exception(BX_GP_EXCEPTION, 0);
592   }
593 
594   // Most counters are for hardware specific details, which
595   // we anyhow do not emulate (like pipeline stalls etc)
596 
597   // Could be interesting to count number of memory reads,
598   // writes.  Misaligned etc...  But to monitor bochs, this
599   // is easier done from the host.
600 
601   RAX = 0;
602   RDX = 0; // if P4 and ECX & 0x10000000, then always 0 (short read 32 bits)
603 
604   BX_ERROR(("RDPMC: Performance Counters Support not implemented yet"));
605 #endif
606 
607   BX_NEXT_INSTR(i);
608 }
609 
610 #if BX_CPU_LEVEL >= 5
611 
get_TSC(void)612 Bit64u BX_CPU_C::get_TSC(void)
613 {
614   Bit64u tsc = bx_pc_system.time_ticks() + BX_CPU_THIS_PTR tsc_adjust;
615   return tsc;
616 }
617 
618 #if BX_SUPPORT_VMX || BX_SUPPORT_SVM
get_TSC_VMXAdjust(Bit64u tsc)619 Bit64u BX_CPU_C::get_TSC_VMXAdjust(Bit64u tsc)
620 {
621 #if BX_SUPPORT_VMX
622   if (BX_CPU_THIS_PTR in_vmx_guest) {
623     if (VMEXIT(VMX_VM_EXEC_CTRL2_TSC_OFFSET) && SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_TSC_SCALING)) {
624       Bit128u product_128;
625       long_mul(&product_128,tsc,BX_CPU_THIS_PTR vmcs.tsc_multiplier);
626       tsc = (product_128.lo >> 48) | (product_128.hi << 16);   // tsc = (uint64) (long128(tsc_value * tsc_multiplier) >> 48);
627     }
628   }
629 #endif
630   tsc += BX_CPU_THIS_PTR tsc_offset;    // BX_CPU_THIS_PTR tsc_offset = 0 if not in VMX or SVM guest
631   return tsc;
632 }
633 #endif
634 
set_TSC(Bit64u newval)635 void BX_CPU_C::set_TSC(Bit64u newval)
636 {
637   // compute the correct setting of tsc_adjust so that a get_TSC()
638   // will return newval
639   BX_CPU_THIS_PTR tsc_adjust = newval - bx_pc_system.time_ticks();
640 
641   // verify
642   BX_ASSERT(get_TSC() == newval);
643 }
644 #endif
645 
RDTSC(bxInstruction_c * i)646 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSC(bxInstruction_c *i)
647 {
648 #if BX_CPU_LEVEL >= 5
649   if (BX_CPU_THIS_PTR cr4.get_TSD() && CPL != 0) {
650     BX_ERROR(("%s: not allowed to use instruction !", i->getIaOpcodeNameShort()));
651     exception(BX_GP_EXCEPTION, 0);
652   }
653 
654 #if BX_SUPPORT_VMX
655   if (BX_CPU_THIS_PTR in_vmx_guest) {
656     if (VMEXIT(VMX_VM_EXEC_CTRL2_RDTSC_VMEXIT)) {
657       VMexit(VMX_VMEXIT_RDTSC, 0);
658     }
659   }
660 #endif
661 
662 #if BX_SUPPORT_SVM
663   if (BX_CPU_THIS_PTR in_svm_guest)
664     if (SVM_INTERCEPT(SVM_INTERCEPT0_RDTSC)) Svm_Vmexit(SVM_VMEXIT_RDTSC);
665 #endif
666 
667   // return ticks
668   Bit64u ticks = BX_CPU_THIS_PTR get_TSC();
669 #if BX_SUPPORT_SVM || BX_SUPPORT_VMX
670   ticks = BX_CPU_THIS_PTR get_TSC_VMXAdjust(ticks);
671 #endif
672 
673   RAX = GET32L(ticks);
674   RDX = GET32H(ticks);
675 
676   BX_DEBUG(("RDTSC: ticks 0x%08x:%08x", EDX, EAX));
677 #endif
678 
679   BX_NEXT_INSTR(i);
680 }
681 
RDTSCP(bxInstruction_c * i)682 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSCP(bxInstruction_c *i)
683 {
684 #if BX_SUPPORT_X86_64
685 
686 #if BX_SUPPORT_VMX
687   // RDTSCP will always #UD in legacy VMX mode, the #UD takes priority over any other exception the instruction may incur.
688   if (BX_CPU_THIS_PTR in_vmx_guest) {
689     if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_RDTSCP)) {
690        BX_ERROR(("%s in VMX guest: not allowed to use instruction !", i->getIaOpcodeNameShort()));
691        exception(BX_UD_EXCEPTION, 0);
692     }
693   }
694 #endif
695 
696   if (BX_CPU_THIS_PTR cr4.get_TSD() && CPL != 0) {
697     BX_ERROR(("%s: not allowed to use instruction !", i->getIaOpcodeNameShort()));
698     exception(BX_GP_EXCEPTION, 0);
699   }
700 
701 #if BX_SUPPORT_VMX
702   if (BX_CPU_THIS_PTR in_vmx_guest) {
703     if (VMEXIT(VMX_VM_EXEC_CTRL2_RDTSC_VMEXIT)) {
704       VMexit(VMX_VMEXIT_RDTSCP, 0);
705     }
706   }
707 #endif
708 
709 #if BX_SUPPORT_SVM
710   if (BX_CPU_THIS_PTR in_svm_guest)
711     if (SVM_INTERCEPT(SVM_INTERCEPT1_RDTSCP)) Svm_Vmexit(SVM_VMEXIT_RDTSCP);
712 #endif
713 
714   // return ticks
715   Bit64u ticks = BX_CPU_THIS_PTR get_TSC();
716 #if BX_SUPPORT_SVM || BX_SUPPORT_VMX
717   ticks = BX_CPU_THIS_PTR get_TSC_VMXAdjust(ticks);
718 #endif
719 
720   RAX = GET32L(ticks);
721   RDX = GET32H(ticks);
722   RCX = BX_CPU_THIS_PTR msr.tsc_aux;
723 
724 #endif
725 
726   BX_NEXT_INSTR(i);
727 }
728 
RDPID_Ed(bxInstruction_c * i)729 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPID_Ed(bxInstruction_c *i)
730 {
731 #if BX_SUPPORT_X86_64
732 
733 #if BX_SUPPORT_VMX
734   // RDTSCP will always #UD in legacy VMX mode
735   if (BX_CPU_THIS_PTR in_vmx_guest) {
736     if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_RDTSCP)) {
737        BX_ERROR(("%s in VMX guest: not allowed to use instruction !", i->getIaOpcodeNameShort()));
738        exception(BX_UD_EXCEPTION, 0);
739     }
740   }
741 #endif
742 
743   BX_WRITE_32BIT_REGZ(i->dst(), BX_CPU_THIS_PTR msr.tsc_aux);
744 #endif
745 
746   BX_NEXT_INSTR(i);
747 }
748 
SYSENTER(bxInstruction_c * i)749 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSENTER(bxInstruction_c *i)
750 {
751 #if BX_CPU_LEVEL >= 6
752   if (real_mode()) {
753     BX_ERROR(("%s: not recognized in real mode !", i->getIaOpcodeNameShort()));
754     exception(BX_GP_EXCEPTION, 0);
755   }
756   if ((BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK) == 0) {
757     BX_ERROR(("SYSENTER with zero sysenter_cs_msr !"));
758     exception(BX_GP_EXCEPTION, 0);
759   }
760 
761   invalidate_prefetch_q();
762 
763   BX_INSTR_FAR_BRANCH_ORIGIN();
764 
765   BX_CPU_THIS_PTR clear_VM();       // do this just like the book says to do
766   BX_CPU_THIS_PTR clear_IF();
767   BX_CPU_THIS_PTR clear_RF();
768 
769 #if BX_SUPPORT_X86_64
770   if (long_mode()) {
771     if (!IsCanonical(BX_CPU_THIS_PTR msr.sysenter_eip_msr)) {
772       BX_ERROR(("SYSENTER with non-canonical SYSENTER_EIP_MSR !"));
773       exception(BX_GP_EXCEPTION, 0);
774     }
775     if (!IsCanonical(BX_CPU_THIS_PTR msr.sysenter_esp_msr)) {
776       BX_ERROR(("SYSENTER with non-canonical SYSENTER_ESP_MSR !"));
777       exception(BX_GP_EXCEPTION, 0);
778     }
779   }
780 #endif
781 
782 #if BX_SUPPORT_CET
783   if (ShadowStackEnabled(CPL))
784     BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] = SSP;
785   if (ShadowStackEnabled(0)) SSP = 0;
786   track_indirect(0);
787 #endif
788 
789   parse_selector(BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK,
790                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
791 
792   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
793   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
794   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 0;
795   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
796   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
797   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;          // base address
798   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
799   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1;          // 4k granularity
800   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0;          // available for use by system
801   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = !long_mode();
802 #if BX_SUPPORT_X86_64
803   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            =  long_mode();
804 #endif
805 
806 #if BX_SUPPORT_X86_64
807   handleCpuModeChange(); // mode change could happen only when in long_mode()
808 #else
809   updateFetchModeMask(/* CS reloaded */);
810 #endif
811 
812 #if BX_SUPPORT_ALIGNMENT_CHECK
813   BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
814 #endif
815 
816   parse_selector((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 8) & BX_SELECTOR_RPL_MASK,
817                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
818 
819   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid    = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
820   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p        = 1;
821   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl      = 0;
822   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment  = 1; /* data/code segment */
823   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type     = BX_DATA_READ_WRITE_ACCESSED;
824   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0;          // base address
825   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
826   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1;          // 4k granularity
827   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1;          // 32-bit mode
828   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0;          // available for use by system
829 #if BX_SUPPORT_X86_64
830   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
831 #endif
832 
833 #if BX_SUPPORT_X86_64
834   if (long_mode()) {
835     RSP = BX_CPU_THIS_PTR msr.sysenter_esp_msr;
836     RIP = BX_CPU_THIS_PTR msr.sysenter_eip_msr;
837   }
838   else
839 #endif
840   {
841     ESP = (Bit32u) BX_CPU_THIS_PTR msr.sysenter_esp_msr;
842     EIP = (Bit32u) BX_CPU_THIS_PTR msr.sysenter_eip_msr;
843   }
844 
845   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSENTER,
846                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
847                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
848 #endif
849 
850   BX_NEXT_TRACE(i);
851 }
852 
SYSEXIT(bxInstruction_c * i)853 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSEXIT(bxInstruction_c *i)
854 {
855 #if BX_CPU_LEVEL >= 6
856   if (real_mode() || CPL != 0) {
857     BX_ERROR(("SYSEXIT from real mode or with CPL<>0 !"));
858     exception(BX_GP_EXCEPTION, 0);
859   }
860   if ((BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK) == 0) {
861     BX_ERROR(("SYSEXIT with zero sysenter_cs_msr !"));
862     exception(BX_GP_EXCEPTION, 0);
863   }
864 
865   invalidate_prefetch_q();
866 
867   BX_INSTR_FAR_BRANCH_ORIGIN();
868 
869 #if BX_SUPPORT_X86_64
870   if (i->os64L()) {
871     if (!IsCanonical(RDX)) {
872        BX_ERROR(("SYSEXIT with non-canonical RDX (RIP) pointer !"));
873        exception(BX_GP_EXCEPTION, 0);
874     }
875     if (!IsCanonical(RCX)) {
876        BX_ERROR(("SYSEXIT with non-canonical RCX (RSP) pointer !"));
877        exception(BX_GP_EXCEPTION, 0);
878     }
879 
880     parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 32) & BX_SELECTOR_RPL_MASK) | 3,
881             &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
882 
883     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
884     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
885     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
886     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
887     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
888     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;           // base address
889     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  // scaled segment limit
890     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1;           // 4k granularity
891     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0;           // available for use by system
892     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 0;
893     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 1;
894 
895     RSP = RCX;
896     RIP = RDX;
897   }
898   else
899 #endif
900   {
901     parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 16) & BX_SELECTOR_RPL_MASK) | 3,
902             &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
903 
904     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
905     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
906     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
907     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
908     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
909     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;           // base address
910     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  // scaled segment limit
911     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1;           // 4k granularity
912     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0;           // available for use by system
913     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
914 #if BX_SUPPORT_X86_64
915     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0;
916 #endif
917 
918     ESP = ECX;
919     EIP = EDX;
920   }
921 
922 #if BX_SUPPORT_X86_64
923   handleCpuModeChange(); // mode change could happen only when in long_mode()
924 #else
925   updateFetchModeMask(/* CS reloaded */);
926 #endif
927 
928   handleAlignmentCheck(/* CPL change */);
929 
930   parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + (i->os64L() ? 40:24)) & BX_SELECTOR_RPL_MASK) | 3,
931             &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
932 
933   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid    = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
934   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p        = 1;
935   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl      = 3;
936   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment  = 1; /* data/code segment */
937   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type     = BX_DATA_READ_WRITE_ACCESSED;
938   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0;           // base address
939   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  // scaled segment limit
940   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1;           // 4k granularity
941   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1;           // 32-bit mode
942   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0;           // available for use by system
943 #if BX_SUPPORT_X86_64
944   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
945 #endif
946 
947 #if BX_SUPPORT_CET
948   if (ShadowStackEnabled(CPL))
949     SSP = BX_CPU_THIS_PTR msr.ia32_pl_ssp[3];
950 #endif
951 
952   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSEXIT,
953                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
954                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
955 #endif
956 
957   BX_NEXT_TRACE(i);
958 }
959 
SYSCALL(bxInstruction_c * i)960 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i)
961 {
962 #if BX_CPU_LEVEL >= 5
963   bx_address temp_RIP;
964 
965   BX_DEBUG(("Execute SYSCALL instruction"));
966 
967   if (!BX_CPU_THIS_PTR efer.get_SCE()) {
968     exception(BX_UD_EXCEPTION, 0);
969   }
970 
971   invalidate_prefetch_q();
972 
973   BX_INSTR_FAR_BRANCH_ORIGIN();
974 
975 #if BX_SUPPORT_CET
976   unsigned old_CPL = CPL;
977 #endif
978 
979 #if BX_SUPPORT_X86_64
980   if (long_mode())
981   {
982     RCX = RIP;
983     R11 = read_eflags() & ~(EFlagsRFMask);
984 
985     if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
986       temp_RIP = BX_CPU_THIS_PTR msr.lstar;
987     }
988     else {
989       temp_RIP = BX_CPU_THIS_PTR msr.cstar;
990     }
991 
992     // set up CS segment, flat, 64-bit DPL=0
993     parse_selector((BX_CPU_THIS_PTR msr.star >> 32) & BX_SELECTOR_RPL_MASK,
994                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
995 
996     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
997     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
998     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 0;
999     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1000     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1001     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1002     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1003     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1004     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 0;
1005     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 1; /* 64-bit code */
1006     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1007 
1008     handleCpuModeChange(); // mode change could only happen when in long_mode()
1009 
1010 #if BX_SUPPORT_ALIGNMENT_CHECK
1011     BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1012 #endif
1013 
1014     // set up SS segment, flat, 64-bit DPL=0
1015     parse_selector(((BX_CPU_THIS_PTR msr.star >> 32) + 8) & BX_SELECTOR_RPL_MASK,
1016                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1017 
1018     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1019     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1020     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 0;
1021     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1022     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1023     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0; /* base address */
1024     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1025     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1; /* 4k granularity */
1026     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1; /* 32 bit stack */
1027     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
1028     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0; /* available for use by system */
1029 
1030     writeEFlags(read_eflags() & ~(BX_CPU_THIS_PTR msr.fmask) & ~(EFlagsRFMask), EFlagsValidMask);
1031     RIP = temp_RIP;
1032   }
1033   else
1034 #endif
1035   {
1036     // legacy mode
1037 
1038     ECX = EIP;
1039     temp_RIP = (Bit32u)(BX_CPU_THIS_PTR msr.star);
1040 
1041     // set up CS segment, flat, 32-bit DPL=0
1042     parse_selector((BX_CPU_THIS_PTR msr.star >> 32) & BX_SELECTOR_RPL_MASK,
1043                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1044 
1045     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1046     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1047     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 0;
1048     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1049     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1050     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1051     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1052     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1053     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
1054 #if BX_SUPPORT_X86_64
1055     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0; /* 32-bit code */
1056 #endif
1057     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1058 
1059     updateFetchModeMask(/* CS reloaded */);
1060 
1061 #if BX_SUPPORT_ALIGNMENT_CHECK
1062     BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1063 #endif
1064 
1065     // set up SS segment, flat, 32-bit DPL=0
1066     parse_selector(((BX_CPU_THIS_PTR msr.star >> 32) + 8) & BX_SELECTOR_RPL_MASK,
1067                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1068 
1069     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1070     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1071     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 0;
1072     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1073     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1074     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0; /* base address */
1075     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1076     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1; /* 4k granularity */
1077     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1; /* 32 bit stack */
1078 #if BX_SUPPORT_X86_64
1079     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
1080 #endif
1081     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0; /* available for use by system */
1082 
1083     BX_CPU_THIS_PTR clear_VM();
1084     BX_CPU_THIS_PTR clear_IF();
1085     BX_CPU_THIS_PTR clear_RF();
1086     RIP = temp_RIP;
1087   }
1088 
1089 #if BX_SUPPORT_CET
1090   if (ShadowStackEnabled(old_CPL))
1091     BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] = SSP;
1092   if (ShadowStackEnabled(0)) SSP = 0;
1093   track_indirect(0);
1094 #endif
1095 
1096   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSCALL,
1097                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
1098                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1099 #endif
1100 
1101   BX_NEXT_TRACE(i);
1102 }
1103 
SYSRET(bxInstruction_c * i)1104 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i)
1105 {
1106 #if BX_CPU_LEVEL >= 5
1107   bx_address temp_RIP;
1108 
1109   BX_DEBUG(("Execute SYSRET instruction"));
1110 
1111   if (!BX_CPU_THIS_PTR efer.get_SCE()) {
1112     exception(BX_UD_EXCEPTION, 0);
1113   }
1114 
1115   if(!protected_mode() || CPL != 0) {
1116     BX_ERROR(("%s: priveledge check failed, generate #GP(0)", i->getIaOpcodeNameShort()));
1117     exception(BX_GP_EXCEPTION, 0);
1118   }
1119 
1120   invalidate_prefetch_q();
1121 
1122   BX_INSTR_FAR_BRANCH_ORIGIN();
1123 
1124 #if BX_SUPPORT_X86_64
1125   if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
1126   {
1127     if (i->os64L()) {
1128       if (!IsCanonical(RCX)) {
1129         BX_ERROR(("SYSRET: canonical failure for RCX (RIP)"));
1130         exception(BX_GP_EXCEPTION, 0);
1131       }
1132 
1133       // Return to 64-bit mode, set up CS segment, flat, 64-bit DPL=3
1134       parse_selector((((BX_CPU_THIS_PTR msr.star >> 48) + 16) & BX_SELECTOR_RPL_MASK) | 3,
1135                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1136 
1137       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1138       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1139       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
1140       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1141       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1142       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1143       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1144       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1145       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 0;
1146       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 1; /* 64-bit code */
1147       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1148 
1149       temp_RIP = RCX;
1150     }
1151     else {
1152       // Return to 32-bit compatibility mode, set up CS segment, flat, 32-bit DPL=3
1153       parse_selector((BX_CPU_THIS_PTR msr.star >> 48) | 3,
1154                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1155 
1156       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1157       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1158       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
1159       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1160       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1161       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1162       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1163       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1164       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
1165       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0; /* 32-bit code */
1166       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1167 
1168       temp_RIP = ECX;
1169     }
1170 
1171     handleCpuModeChange(); // mode change could only happen when in long64 mode
1172 
1173     handleAlignmentCheck(/* CPL change */);
1174 
1175     // SS base, limit, attributes unchanged
1176     parse_selector((Bit16u)(((BX_CPU_THIS_PTR msr.star >> 48) + 8) | 3),
1177                        &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1178 
1179     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1180     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1181     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 3;
1182     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1;  /* data/code segment */
1183     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1184 
1185     writeEFlags((Bit32u) R11, EFlagsValidMask);
1186   }
1187   else // (!64BIT_MODE)
1188 #endif
1189   {
1190     // Return to 32-bit legacy mode, set up CS segment, flat, 32-bit DPL=3
1191     parse_selector((BX_CPU_THIS_PTR msr.star >> 48) | 3,
1192                      &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1193 
1194     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1195     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1196     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
1197     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1198     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1199     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1200     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1201     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1202     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
1203 #if BX_SUPPORT_X86_64
1204     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0; /* 32-bit code */
1205 #endif
1206     BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1207 
1208     updateFetchModeMask(/* CS reloaded */);
1209 
1210     handleAlignmentCheck(/* CPL change */);
1211 
1212     // SS base, limit, attributes unchanged
1213     parse_selector((Bit16u)(((BX_CPU_THIS_PTR msr.star >> 48) + 8) | 3),
1214                      &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1215 
1216     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
1217     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1218     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 3;
1219     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1;  /* data/code segment */
1220     BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1221 
1222     BX_CPU_THIS_PTR assert_IF();
1223     temp_RIP = ECX;
1224   }
1225 
1226   handleCpuModeChange();
1227 
1228   RIP = temp_RIP;
1229 
1230 #if BX_SUPPORT_CET
1231   if (ShadowStackEnabled(CPL))
1232     SSP = BX_CPU_THIS_PTR msr.ia32_pl_ssp[3];
1233 #endif
1234 
1235   BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSRET,
1236                       FAR_BRANCH_PREV_CS, FAR_BRANCH_PREV_RIP,
1237                       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1238 #endif
1239 
1240   BX_NEXT_TRACE(i);
1241 }
1242 
1243 #if BX_SUPPORT_X86_64
1244 
SWAPGS(bxInstruction_c * i)1245 void BX_CPP_AttrRegparmN(1) BX_CPU_C::SWAPGS(bxInstruction_c *i)
1246 {
1247   if(CPL != 0)
1248     exception(BX_GP_EXCEPTION, 0);
1249 
1250   Bit64u temp_GS_base = MSR_GSBASE;
1251   MSR_GSBASE = BX_CPU_THIS_PTR msr.kernelgsbase;
1252   BX_CPU_THIS_PTR msr.kernelgsbase = temp_GS_base;
1253 
1254   BX_NEXT_INSTR(i);
1255 }
1256 
1257 /* F3 0F AE /0 */
RDFSBASE_Ed(bxInstruction_c * i)1258 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDFSBASE_Ed(bxInstruction_c *i)
1259 {
1260   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1261     exception(BX_UD_EXCEPTION, 0);
1262 
1263   BX_WRITE_32BIT_REGZ(i->dst(), (Bit32u) MSR_FSBASE);
1264   BX_NEXT_INSTR(i);
1265 }
1266 
RDFSBASE_Eq(bxInstruction_c * i)1267 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDFSBASE_Eq(bxInstruction_c *i)
1268 {
1269   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1270     exception(BX_UD_EXCEPTION, 0);
1271 
1272   BX_WRITE_64BIT_REG(i->dst(), MSR_FSBASE);
1273   BX_NEXT_INSTR(i);
1274 }
1275 
1276 /* F3 0F AE /1 */
RDGSBASE_Ed(bxInstruction_c * i)1277 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDGSBASE_Ed(bxInstruction_c *i)
1278 {
1279   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1280     exception(BX_UD_EXCEPTION, 0);
1281 
1282   BX_WRITE_32BIT_REGZ(i->dst(), (Bit32u) MSR_GSBASE);
1283   BX_NEXT_INSTR(i);
1284 }
1285 
RDGSBASE_Eq(bxInstruction_c * i)1286 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDGSBASE_Eq(bxInstruction_c *i)
1287 {
1288   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1289     exception(BX_UD_EXCEPTION, 0);
1290 
1291   BX_WRITE_64BIT_REG(i->dst(), MSR_GSBASE);
1292   BX_NEXT_INSTR(i);
1293 }
1294 
1295 /* F3 0F AE /2 */
WRFSBASE_Ed(bxInstruction_c * i)1296 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRFSBASE_Ed(bxInstruction_c *i)
1297 {
1298   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1299     exception(BX_UD_EXCEPTION, 0);
1300 
1301   // 32-bit value is always canonical
1302   MSR_FSBASE = BX_READ_32BIT_REG(i->src());
1303 
1304   BX_NEXT_INSTR(i);
1305 }
1306 
WRFSBASE_Eq(bxInstruction_c * i)1307 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRFSBASE_Eq(bxInstruction_c *i)
1308 {
1309   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1310     exception(BX_UD_EXCEPTION, 0);
1311 
1312   Bit64u fsbase = BX_READ_64BIT_REG(i->src());
1313   if (!IsCanonical(fsbase)) {
1314     BX_ERROR(("%s: canonical failure !", i->getIaOpcodeNameShort()));
1315     exception(BX_GP_EXCEPTION, 0);
1316   }
1317   MSR_FSBASE = fsbase;
1318 
1319   BX_NEXT_INSTR(i);
1320 }
1321 
1322 /* F3 0F AE /3 */
WRGSBASE_Ed(bxInstruction_c * i)1323 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRGSBASE_Ed(bxInstruction_c *i)
1324 {
1325   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1326     exception(BX_UD_EXCEPTION, 0);
1327 
1328   // 32-bit value is always canonical
1329   MSR_GSBASE = BX_READ_32BIT_REG(i->src());
1330 
1331   BX_NEXT_INSTR(i);
1332 }
1333 
WRGSBASE_Eq(bxInstruction_c * i)1334 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRGSBASE_Eq(bxInstruction_c *i)
1335 {
1336   if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1337     exception(BX_UD_EXCEPTION, 0);
1338 
1339   Bit64u gsbase = BX_READ_64BIT_REG(i->src());
1340   if (!IsCanonical(gsbase)) {
1341     BX_ERROR(("%s: canonical failure !", i->getIaOpcodeNameShort()));
1342     exception(BX_GP_EXCEPTION, 0);
1343   }
1344   MSR_GSBASE = gsbase;
1345 
1346   BX_NEXT_INSTR(i);
1347 }
1348 
1349 #endif // BX_SUPPORT_X86_64
1350 
1351 #if BX_SUPPORT_PKEYS
1352 
set_PKeys(Bit32u pkru_val,Bit32u pkrs_val)1353 void BX_CPU_C::set_PKeys(Bit32u pkru_val, Bit32u pkrs_val)
1354 {
1355   BX_CPU_THIS_PTR pkru = pkru_val;
1356   BX_CPU_THIS_PTR pkrs = pkrs_val;
1357 
1358   for (unsigned i=0; i<16; i++) {
1359     BX_CPU_THIS_PTR rd_pkey[i] = BX_CPU_THIS_PTR wr_pkey[i] =
1360       TLB_SysReadOK | TLB_UserReadOK | TLB_SysWriteOK | TLB_UserWriteOK;
1361 
1362     if (long_mode()) {
1363       if (BX_CPU_THIS_PTR cr4.get_PKE()) {
1364         // accessDisable bit set
1365         if (pkru_val & (1<<(i*2))) {
1366           BX_CPU_THIS_PTR rd_pkey[i] &= ~(TLB_UserReadOK | TLB_UserWriteOK);
1367           BX_CPU_THIS_PTR wr_pkey[i] &= ~(TLB_UserReadOK | TLB_UserWriteOK);
1368         }
1369 
1370         // writeDisable bit set
1371         if (pkru_val & (1<<(i*2+1))) {
1372           BX_CPU_THIS_PTR wr_pkey[i] &= ~(TLB_UserWriteOK);
1373           if (BX_CPU_THIS_PTR cr0.get_WP())
1374             BX_CPU_THIS_PTR wr_pkey[i] &= ~(TLB_SysWriteOK);
1375         }
1376       }
1377 
1378       if (BX_CPU_THIS_PTR cr4.get_PKS()) {
1379         // accessDisable bit set
1380         if (pkrs_val & (1<<(i*2))) {
1381           BX_CPU_THIS_PTR rd_pkey[i] &= ~(TLB_SysReadOK | TLB_SysWriteOK);
1382           BX_CPU_THIS_PTR wr_pkey[i] &= ~(TLB_SysReadOK | TLB_SysWriteOK);
1383         }
1384 
1385         // writeDisable bit set
1386         if (pkrs_val & (1<<(i*2+1))) {
1387           if (BX_CPU_THIS_PTR cr0.get_WP())
1388             BX_CPU_THIS_PTR wr_pkey[i] &= ~(TLB_SysWriteOK);
1389         }
1390       }
1391     }
1392 
1393 #if BX_SUPPORT_CET
1394     // replicate pkey access bits for shadow stack checks
1395     BX_CPU_THIS_PTR rd_pkey[i] |= BX_CPU_THIS_PTR rd_pkey[i]<<4;
1396     BX_CPU_THIS_PTR wr_pkey[i] |= BX_CPU_THIS_PTR wr_pkey[i]<<4;
1397 #endif
1398   }
1399 }
1400 
RDPKRU(bxInstruction_c * i)1401 void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPKRU(bxInstruction_c *i)
1402 {
1403   if (! BX_CPU_THIS_PTR cr4.get_PKE())
1404     exception(BX_UD_EXCEPTION, 0);
1405 
1406   if (ECX != 0)
1407     exception(BX_GP_EXCEPTION, 0);
1408 
1409   RAX = BX_CPU_THIS_PTR pkru;
1410   RDX = 0;
1411 
1412   BX_NEXT_INSTR(i);
1413 }
1414 
WRPKRU(bxInstruction_c * i)1415 void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRPKRU(bxInstruction_c *i)
1416 {
1417   if (! BX_CPU_THIS_PTR cr4.get_PKE())
1418     exception(BX_UD_EXCEPTION, 0);
1419 
1420   if ((ECX|EDX) != 0)
1421     exception(BX_GP_EXCEPTION, 0);
1422 
1423   BX_CPU_THIS_PTR set_PKeys(EAX, BX_CPU_THIS_PTR pkrs);
1424 
1425   BX_NEXT_TRACE(i);
1426 }
1427 
1428 #endif // BX_SUPPORT_PKEYS
1429