1 /////////////////////////////////////////////////////////////////////////
2 // $Id: vmx.cc 14319 2021-07-23 10:13:48Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 //   Copyright (c) 2009-2019 Stanislav Shwartsman
6 //          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 //  This library is free software; you can redistribute it and/or
9 //  modify it under the terms of the GNU Lesser General Public
10 //  License as published by the Free Software Foundation; either
11 //  version 2 of the License, or (at your option) any later version.
12 //
13 //  This library is distributed in the hope that it will be useful,
14 //  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 //  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 //  Lesser General Public License for more details.
17 //
18 //  You should have received a copy of the GNU Lesser General Public
19 //  License along with this library; if not, write to the Free Software
20 //  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 //
22 /////////////////////////////////////////////////////////////////////////
23 
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #include "msr.h"
28 #define LOG_THIS BX_CPU_THIS_PTR
29 
30 #include "iodev/iodev.h"
31 
32 #if BX_SUPPORT_VMX
33 
34 extern VMCS_Mapping vmcs_map;
35 
36 #if BX_SUPPORT_VMX >= 2
37 extern bool isValidMSR_PAT(Bit64u pat_msr);
38 #endif
39 
40 #if BX_SUPPORT_CET
41 extern bool is_invalid_cet_control(bx_address val);
42 #endif
43 
44 extern const char *segname[];
45 
46 ////////////////////////////////////////////////////////////
47 // VMEXIT reasons for BX prints
48 ////////////////////////////////////////////////////////////
49 
50 static const char *VMX_vmexit_reason_name[] =
51 {
52   /*  0 */  "Exception or NMI",
53   /*  1 */  "External Interrupt",
54   /*  2 */  "Triple Fault",
55   /*  3 */  "INIT",
56   /*  4 */  "SIPI",
57   /*  5 */  "I/O SMI (SMM Vmexit)",
58   /*  6 */  "SMI (SMM Vmexit)",
59   /*  7 */  "Interrupt Window Exiting",
60   /*  8 */  "NMI Window Exiting",
61   /*  9 */  "Task Switch",
62   /* 10 */  "CPUID",
63   /* 11 */  "GETSEC",
64   /* 12 */  "HLT",
65   /* 13 */  "INVD",
66   /* 14 */  "INVLPG",
67   /* 15 */  "RDPMC",
68   /* 16 */  "RDTSC",
69   /* 17 */  "RSM",
70   /* 18 */  "VMCALL",
71   /* 19 */  "VMCLEAR",
72   /* 20 */  "VMLAUNCH",
73   /* 21 */  "VMPTRLD",
74   /* 22 */  "VMPTRST",
75   /* 23 */  "VMREAD",
76   /* 24 */  "VMRESUME",
77   /* 25 */  "VMWRITE",
78   /* 26 */  "VMXOFF",
79   /* 27 */  "VMXON",
80   /* 28 */  "CR Access",
81   /* 29 */  "DR Access",
82   /* 30 */  "I/O Instruction",
83   /* 31 */  "RDMSR",
84   /* 32 */  "WRMSR",
85   /* 33 */  "VMEntry failure due to invalid guest state",
86   /* 34 */  "VMEntry failure due to MSR loading",
87   /* 35 */  "Reserved35",
88   /* 36 */  "MWAIT",
89   /* 37 */  "MTF (Monitor Trap Flag)",
90   /* 38 */  "Reserved38",
91   /* 39 */  "MONITOR",
92   /* 40 */  "PAUSE",
93   /* 41 */  "VMEntry failure due to machine check",
94   /* 42 */  "Reserved42",
95   /* 43 */  "TPR Below Threshold",
96   /* 44 */  "APIC Access",
97   /* 45 */  "Virtualized EOI",
98   /* 46 */  "GDTR/IDTR Access",
99   /* 47 */  "LDTR/TR Access",
100   /* 48 */  "EPT Violation",
101   /* 49 */  "EPT Misconfiguration",
102   /* 50 */  "INVEPT",
103   /* 51 */  "RDTSCP",
104   /* 52 */  "VMX preemption timer expired",
105   /* 53 */  "INVVPID",
106   /* 54 */  "WBINVD",
107   /* 55 */  "XSETBV",
108   /* 56 */  "APIC Write Trap",
109   /* 57 */  "RDRAND",
110   /* 58 */  "INVPCID",
111   /* 59 */  "VMFUNC",
112   /* 60 */  "ENCLS",
113   /* 61 */  "RDSEED",
114   /* 62 */  "PML Log Full",
115   /* 63 */  "XSAVES",
116   /* 64 */  "XRSTORS",
117   /* 65 */  "Reserved65",
118   /* 66 */  "Sub-Page Protection",
119   /* 67 */  "UMWAIT",
120   /* 68 */  "TPAUSE",
121   /* 69 */  "Reserved69",
122   /* 70 */  "Reserved70",
123   /* 71 */  "Reserved71",
124   /* 72 */  "ENQCMD PASID Translation",
125   /* 73 */  "ENQCMDS PASID Translation",
126 };
127 
128 #include "decoder/ia_opcodes.h"
129 
130 ////////////////////////////////////////////////////////////
131 // VMCS access
132 ////////////////////////////////////////////////////////////
133 
set_VMCSPTR(Bit64u vmxptr)134 void BX_CPU_C::set_VMCSPTR(Bit64u vmxptr)
135 {
136   BX_CPU_THIS_PTR vmcsptr = vmxptr;
137 
138   if (vmxptr != BX_INVALID_VMCSPTR) {
139     BX_CPU_THIS_PTR vmcshostptr = BX_CPU_THIS_PTR getHostMemAddr(vmxptr, BX_WRITE);
140 #if BX_SUPPORT_MEMTYPE
141     BX_CPU_THIS_PTR vmcs_memtype = MEMTYPE(resolve_memtype(vmxptr));
142 #endif
143   }
144   else {
145     BX_CPU_THIS_PTR vmcshostptr = 0;
146 #if BX_SUPPORT_MEMTYPE
147     BX_CPU_THIS_PTR vmcs_memtype = BX_MEMTYPE_UC;
148 #endif
149   }
150 }
151 
VMread16(unsigned encoding)152 Bit16u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread16(unsigned encoding)
153 {
154   Bit16u field;
155 
156   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
157   if(offset >= VMX_VMCS_AREA_SIZE)
158     BX_PANIC(("VMread16: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
159   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
160 
161   if (BX_CPU_THIS_PTR vmcshostptr) {
162     Bit16u *hostAddr = (Bit16u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
163     field = ReadHostWordFromLittleEndian(hostAddr);
164   }
165   else {
166     access_read_physical(pAddr, 2, (Bit8u*)(&field));
167   }
168 
169   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&field));
170 
171   return field;
172 }
173 
174 // write 16-bit value into VMCS 16-bit field
VMwrite16(unsigned encoding,Bit16u val_16)175 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite16(unsigned encoding, Bit16u val_16)
176 {
177   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
178   if(offset >= VMX_VMCS_AREA_SIZE)
179     BX_PANIC(("VMwrite16: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
180   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
181 
182   if (BX_CPU_THIS_PTR vmcshostptr) {
183     Bit16u *hostAddr = (Bit16u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
184     pageWriteStampTable.decWriteStamp(pAddr, 2);
185     WriteHostWordToLittleEndian(hostAddr, val_16);
186   }
187   else {
188     access_write_physical(pAddr, 2, (Bit8u*)(&val_16));
189   }
190 
191   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&val_16));
192 }
193 
VMread32(unsigned encoding)194 Bit32u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread32(unsigned encoding)
195 {
196   Bit32u field;
197 
198   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
199   if(offset >= VMX_VMCS_AREA_SIZE)
200     BX_PANIC(("VMread32: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
201   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
202 
203   if (BX_CPU_THIS_PTR vmcshostptr) {
204     Bit32u *hostAddr = (Bit32u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
205     field = ReadHostDWordFromLittleEndian(hostAddr);
206   }
207   else {
208     access_read_physical(pAddr, 4, (Bit8u*)(&field));
209   }
210 
211   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&field));
212 
213   return field;
214 }
215 
216 // write 32-bit value into VMCS field
VMwrite32(unsigned encoding,Bit32u val_32)217 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite32(unsigned encoding, Bit32u val_32)
218 {
219   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
220   if(offset >= VMX_VMCS_AREA_SIZE)
221     BX_PANIC(("VMwrite32: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
222   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
223 
224   if (BX_CPU_THIS_PTR vmcshostptr) {
225     Bit32u *hostAddr = (Bit32u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
226     pageWriteStampTable.decWriteStamp(pAddr, 4);
227     WriteHostDWordToLittleEndian(hostAddr, val_32);
228   }
229   else {
230     access_write_physical(pAddr, 4, (Bit8u*)(&val_32));
231   }
232 
233   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&val_32));
234 }
235 
VMread64(unsigned encoding)236 Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread64(unsigned encoding)
237 {
238   BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
239 
240   Bit64u field;
241 
242   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
243   if(offset >= VMX_VMCS_AREA_SIZE)
244     BX_PANIC(("VMread64: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
245   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
246 
247   if (BX_CPU_THIS_PTR vmcshostptr) {
248     Bit64u *hostAddr = (Bit64u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
249     field = ReadHostQWordFromLittleEndian(hostAddr);
250   }
251   else {
252     access_read_physical(pAddr, 8, (Bit8u*)(&field));
253   }
254 
255   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&field));
256 
257   return field;
258 }
259 
260 // write 64-bit value into VMCS field
VMwrite64(unsigned encoding,Bit64u val_64)261 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite64(unsigned encoding, Bit64u val_64)
262 {
263   BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
264 
265   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
266   if(offset >= VMX_VMCS_AREA_SIZE)
267     BX_PANIC(("VMwrite64: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
268   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
269 
270   if (BX_CPU_THIS_PTR vmcshostptr) {
271     Bit64u *hostAddr = (Bit64u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
272     pageWriteStampTable.decWriteStamp(pAddr, 8);
273     WriteHostQWordToLittleEndian(hostAddr, val_64);
274   }
275   else {
276     access_write_physical(pAddr, 8, (Bit8u*)(&val_64));
277   }
278 
279   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&val_64));
280 }
281 
282 #if BX_SUPPORT_X86_64
VMread_natural(unsigned encoding)283 BX_CPP_INLINE bx_address BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread_natural(unsigned encoding)
284 {
285   return VMread64(encoding);
286 }
287 
VMwrite_natural(unsigned encoding,bx_address val)288 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite_natural(unsigned encoding, bx_address val)
289 {
290   VMwrite64(encoding, val);
291 }
292 #else
VMread_natural(unsigned encoding)293 BX_CPP_INLINE bx_address BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread_natural(unsigned encoding)
294 {
295   return VMread32(encoding);
296 }
297 
VMwrite_natural(unsigned encoding,bx_address val)298 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite_natural(unsigned encoding, bx_address val)
299 {
300   VMwrite32(encoding, val);
301 }
302 #endif
303 
304 ////////////////////////////////////////////////////////////
305 // Shadow VMCS access
306 ////////////////////////////////////////////////////////////
307 
308 #if BX_SUPPORT_VMX >= 2
309 
VMread16_Shadow(unsigned encoding)310 Bit16u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread16_Shadow(unsigned encoding)
311 {
312   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
313   if(offset >= VMX_VMCS_AREA_SIZE)
314     BX_PANIC(("VMread16_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
315 
316   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
317   Bit16u field;
318   access_read_physical(pAddr, 2, (Bit8u*)(&field));
319   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&field));
320 
321   return field;
322 }
323 
324 // write 16-bit value into shadow VMCS 16-bit field
VMwrite16_Shadow(unsigned encoding,Bit16u val_16)325 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite16_Shadow(unsigned encoding, Bit16u val_16)
326 {
327   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
328   if(offset >= VMX_VMCS_AREA_SIZE)
329     BX_PANIC(("VMwrite16_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
330 
331   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
332   access_write_physical(pAddr, 2, (Bit8u*)(&val_16));
333   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&val_16));
334 }
335 
VMread32_Shadow(unsigned encoding)336 Bit32u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread32_Shadow(unsigned encoding)
337 {
338   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
339   if(offset >= VMX_VMCS_AREA_SIZE)
340     BX_PANIC(("VMread32_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
341 
342   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
343   Bit32u field;
344   access_read_physical(pAddr, 4, (Bit8u*)(&field));
345   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&field));
346 
347   return field;
348 }
349 
350 // write 32-bit value into shadow VMCS field
VMwrite32_Shadow(unsigned encoding,Bit32u val_32)351 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite32_Shadow(unsigned encoding, Bit32u val_32)
352 {
353   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
354   if(offset >= VMX_VMCS_AREA_SIZE)
355     BX_PANIC(("VMwrite32_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
356 
357   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
358   access_write_physical(pAddr, 4, (Bit8u*)(&val_32));
359   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&val_32));
360 }
361 
VMread64_Shadow(unsigned encoding)362 Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread64_Shadow(unsigned encoding)
363 {
364   BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
365 
366   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
367   if(offset >= VMX_VMCS_AREA_SIZE)
368     BX_PANIC(("VMread64_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
369 
370   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
371   Bit64u field;
372   access_read_physical(pAddr, 8, (Bit8u*)(&field));
373   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&field));
374 
375   return field;
376 }
377 
378 // write 64-bit value into shadow VMCS field
VMwrite64_Shadow(unsigned encoding,Bit64u val_64)379 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite64_Shadow(unsigned encoding, Bit64u val_64)
380 {
381   BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
382 
383   unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
384   if(offset >= VMX_VMCS_AREA_SIZE)
385     BX_PANIC(("VMwrite64_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
386 
387   bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
388   access_write_physical(pAddr, 8, (Bit8u*)(&val_64));
389   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&val_64));
390 }
391 
392 #endif
393 
394 ////////////////////////////////////////////////////////////
395 // VMfail/VMsucceed
396 ////////////////////////////////////////////////////////////
397 
VMfail(Bit32u error_code)398 BX_CPP_INLINE void BX_CPU_C::VMfail(Bit32u error_code)
399 {
400   clearEFlagsOSZAPC();
401 
402   if ((BX_CPU_THIS_PTR vmcsptr != BX_INVALID_VMCSPTR)) { // executed only if there is a current VMCS
403      assert_ZF();
404      VMwrite32(VMCS_32BIT_INSTRUCTION_ERROR, error_code);
405   }
406   else {
407      assert_CF();
408   }
409 }
410 
VMabort(VMX_vmabort_code error_code)411 void BX_CPU_C::VMabort(VMX_vmabort_code error_code)
412 {
413   VMwrite32(VMCS_VMX_ABORT_FIELD_ENCODING, (Bit32u) error_code);
414 
415 #if BX_SUPPORT_VMX >= 2
416   // Deactivate VMX preemtion timer
417   BX_CPU_THIS_PTR lapic.deactivate_vmx_preemption_timer();
418 #endif
419 
420   shutdown();
421 }
422 
VMXReadRevisionID(bx_phy_address pAddr)423 Bit32u BX_CPU_C::VMXReadRevisionID(bx_phy_address pAddr)
424 {
425   unsigned revision_id_field_offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(VMCS_REVISION_ID_FIELD_ENCODING);
426   if(revision_id_field_offset >= VMX_VMCS_AREA_SIZE)
427     BX_PANIC(("Can't access VMCS_REVISION_ID encoding, offset=0x%x", revision_id_field_offset));
428 
429   Bit32u revision;
430   access_read_physical(pAddr + revision_id_field_offset, 4, &revision);
431   BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + revision_id_field_offset, 4, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype),
432           BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&revision));
433 
434   return revision;
435 }
436 
437 #if BX_SUPPORT_VMX >= 2
is_eptptr_valid(Bit64u eptptr)438 bool BX_CPU_C::is_eptptr_valid(Bit64u eptptr)
439 {
440   // [2:0] EPT paging-structure memory type
441   //       0 = Uncacheable (UC)
442   //       6 = Write-back (WB)
443   Bit32u memtype = eptptr & 7;
444   if (memtype != BX_MEMTYPE_UC && memtype != BX_MEMTYPE_WB) return 0;
445 
446   // [5:3] This value is 1 less than the EPT page-walk length
447   Bit32u walk_length = (eptptr >> 3) & 7;
448   if (walk_length != 3) return 0;
449 
450   // [6]   EPT A/D Enable
451   if (! BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT_ACCESS_DIRTY)) {
452     if (eptptr & 0x40) {
453       BX_ERROR(("is_eptptr_valid: EPTPTR A/D enabled when not supported by CPU"));
454       return 0;
455     }
456   }
457 
458   // [7]   CET: Enable supervisor shadow stack control
459 #if BX_SUPPORT_CET
460   if (! BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_CET)) {
461     if (eptptr & 0x80) {
462       BX_ERROR(("is_eptptr_valid: EPTPTR CET supervisor shadow stack control bit enabled when not supported by CPU"));
463       return 0;
464     }
465   }
466 #endif
467 
468 #define BX_EPTPTR_RESERVED_BITS 0xf00 /* bits 11:8 are reserved */
469   if (eptptr & BX_EPTPTR_RESERVED_BITS) {
470     BX_ERROR(("is_eptptr_valid: EPTPTR reserved bits set"));
471     return 0;
472   }
473 
474   if (! IsValidPhyAddr(eptptr)) return 0;
475   return 1;
476 }
477 #endif
478 
rotate_r(Bit32u val_32)479 BX_CPP_INLINE static Bit32u rotate_r(Bit32u val_32)
480 {
481   return (val_32 >> 8) | (val_32 << 24);
482 }
483 
rotate_l(Bit32u val_32)484 BX_CPP_INLINE static Bit32u rotate_l(Bit32u val_32)
485 {
486   return (val_32 << 8) | (val_32 >> 24);
487 }
488 
489 // AR.NULL is bit 16
vmx_pack_ar_field(Bit32u ar_field,VMCS_Access_Rights_Format access_rights_format)490 BX_CPP_INLINE static Bit32u vmx_pack_ar_field(Bit32u ar_field, VMCS_Access_Rights_Format access_rights_format)
491 {
492   switch (access_rights_format) {
493   case VMCS_AR_ROTATE:
494     ar_field = rotate_l(ar_field);
495     break;
496   case VMCS_AR_PACK:
497     // zero out bit 11
498     ar_field &= 0xfffff7ff;
499     // Null bit (bit 16) to be stored in bit 11
500     ar_field |= ((ar_field & 0x00010000) >> 5);
501     // zero out the upper 16 bits and b8-b10
502     ar_field &= 0x0000f8ff;
503     break;
504   default:
505     break;
506   }
507 
508   return ar_field;
509 }
510 
511 // AR.NULL is bit 16
vmx_unpack_ar_field(Bit32u ar_field,VMCS_Access_Rights_Format access_rights_format)512 BX_CPP_INLINE static Bit32u vmx_unpack_ar_field(Bit32u ar_field, VMCS_Access_Rights_Format access_rights_format)
513 {
514   switch (access_rights_format) {
515   case VMCS_AR_ROTATE:
516     ar_field = rotate_r(ar_field);
517     break;
518   case VMCS_AR_PACK:
519     // zero out bit 16
520     ar_field &= 0xfffeffff;
521     // Null bit to be copied back from bit 11 to bit 16
522     ar_field |= ((ar_field & 0x00000800) << 5);
523     // zero out the bit 17 to bit 31
524     ar_field &= 0x0001ffff;
525     // bits 8 to 11 should be set to 0
526     ar_field &= 0xfffff0ff;
527     break;
528   default:
529     break;
530   }
531 
532   return ar_field;
533 }
534 
535 ////////////////////////////////////////////////////////////
536 // VMenter
537 ////////////////////////////////////////////////////////////
538 
539 extern struct BxExceptionInfo exceptions_info[];
540 
541 #define VMENTRY_INJECTING_EVENT(vmentry_interr_info) (vmentry_interr_info & 0x80000000)
542 
543 #define VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_LO \
544   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PINBASED_CTRLS_LO : VMX_MSR_VMX_PINBASED_CTRLS_LO)
545 #define VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_HI \
546   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PINBASED_CTRLS_HI : VMX_MSR_VMX_PINBASED_CTRLS_HI)
547 
548 #define VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_LO \
549   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_LO : VMX_MSR_VMX_PROCBASED_CTRLS_LO)
550 #define VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_HI \
551   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_HI : VMX_MSR_VMX_PROCBASED_CTRLS_HI)
552 
553 #define VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_LO \
554   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_LO : VMX_MSR_VMX_VMEXIT_CTRLS_LO)
555 #define VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_HI \
556   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_HI : VMX_MSR_VMX_VMEXIT_CTRLS_HI)
557 
558 #define VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_LO \
559   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_LO : VMX_MSR_VMX_VMENTRY_CTRLS_LO)
560 #define VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_HI \
561   ((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_HI : VMX_MSR_VMX_VMENTRY_CTRLS_HI)
562 
VMenterLoadCheckVmControls(void)563 VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
564 {
565   VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
566 
567   //
568   // Load VM-execution control fields to VMCS Cache
569   //
570 
571   vm->vmexec_ctrls1 = VMread32(VMCS_32BIT_CONTROL_PIN_BASED_EXEC_CONTROLS);
572   vm->vmexec_ctrls2 = VMread32(VMCS_32BIT_CONTROL_PROCESSOR_BASED_VMEXEC_CONTROLS);
573   if (VMEXIT(VMX_VM_EXEC_CTRL2_SECONDARY_CONTROLS))
574     vm->vmexec_ctrls3 = VMread32(VMCS_32BIT_CONTROL_SECONDARY_VMEXEC_CONTROLS);
575   else
576     vm->vmexec_ctrls3 = 0;
577   vm->vm_exceptions_bitmap = VMread32(VMCS_32BIT_CONTROL_EXECUTION_BITMAP);
578   vm->vm_pf_mask = VMread32(VMCS_32BIT_CONTROL_PAGE_FAULT_ERR_CODE_MASK);
579   vm->vm_pf_match = VMread32(VMCS_32BIT_CONTROL_PAGE_FAULT_ERR_CODE_MATCH);
580   vm->vm_cr0_mask = VMread_natural(VMCS_CONTROL_CR0_GUEST_HOST_MASK);
581   vm->vm_cr4_mask = VMread_natural(VMCS_CONTROL_CR4_GUEST_HOST_MASK);
582   vm->vm_cr0_read_shadow = VMread_natural(VMCS_CONTROL_CR0_READ_SHADOW);
583   vm->vm_cr4_read_shadow = VMread_natural(VMCS_CONTROL_CR4_READ_SHADOW);
584 
585   vm->vm_cr3_target_cnt = VMread32(VMCS_32BIT_CONTROL_CR3_TARGET_COUNT);
586   for (int n=0; n<VMX_CR3_TARGET_MAX_CNT; n++)
587     vm->vm_cr3_target_value[n] = VMread_natural(VMCS_CR3_TARGET0 + 2*n);
588 
589   //
590   // Check VM-execution control fields
591   //
592 
593   if (~vm->vmexec_ctrls1 & VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_LO) {
594      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX pin-based controls allowed 0-settings"));
595      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
596   }
597   if (vm->vmexec_ctrls1 & ~VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_HI) {
598      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX pin-based controls allowed 1-settings [0x%08x]", vm->vmexec_ctrls1 & ~VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_HI));
599      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
600   }
601 
602   if (~vm->vmexec_ctrls2 & VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_LO) {
603      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX proc-based controls allowed 0-settings"));
604      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
605   }
606   if (vm->vmexec_ctrls2 & ~VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_HI) {
607      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX proc-based controls allowed 1-settings [0x%08x]", vm->vmexec_ctrls2 & ~VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_HI));
608      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
609   }
610 
611   if (~vm->vmexec_ctrls3 & VMX_MSR_VMX_PROCBASED_CTRLS2_LO) {
612      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX secondary proc-based controls allowed 0-settings"));
613      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
614   }
615   if (vm->vmexec_ctrls3 & ~VMX_MSR_VMX_PROCBASED_CTRLS2_HI) {
616      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX secondary controls allowed 1-settings [0x%08x]", vm->vmexec_ctrls3 & ~VMX_MSR_VMX_PROCBASED_CTRLS2_HI));
617      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
618   }
619 
620   if (vm->vm_cr3_target_cnt > VMX_CR3_TARGET_MAX_CNT) {
621      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: too may CR3 targets %d", vm->vm_cr3_target_cnt));
622      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
623   }
624 
625   if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_IO_BITMAPS) {
626      vm->io_bitmap_addr[0] = VMread64(VMCS_64BIT_CONTROL_IO_BITMAP_A);
627      vm->io_bitmap_addr[1] = VMread64(VMCS_64BIT_CONTROL_IO_BITMAP_B);
628      // I/O bitmaps control enabled
629      for (int bitmap=0; bitmap < 2; bitmap++) {
630        if (! IsValidPageAlignedPhyAddr(vm->io_bitmap_addr[bitmap])) {
631          BX_ERROR(("VMFAIL: VMCS EXEC CTRL: I/O bitmap %c phy addr malformed", 'A' + bitmap));
632          return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
633        }
634      }
635   }
636 
637   if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_MSR_BITMAPS) {
638      // MSR bitmaps control enabled
639      vm->msr_bitmap_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_MSR_BITMAPS);
640      if (! IsValidPageAlignedPhyAddr(vm->msr_bitmap_addr)) {
641        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: MSR bitmap phy addr malformed"));
642        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
643      }
644   }
645 
646   if (! (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
647      if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) {
648        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: misconfigured virtual NMI control"));
649        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
650      }
651   }
652 
653   if (! (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) {
654      if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_NMI_WINDOW_EXITING) {
655        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: misconfigured virtual NMI control"));
656        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
657      }
658   }
659 
660 #if BX_SUPPORT_VMX >= 2
661   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VMCS_SHADOWING) {
662      vm->vmread_bitmap_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VMREAD_BITMAP_ADDR);
663      if (! IsValidPageAlignedPhyAddr(vm->vmread_bitmap_addr)) {
664        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMREAD bitmap phy addr malformed"));
665        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
666      }
667      vm->vmwrite_bitmap_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VMWRITE_BITMAP_ADDR);
668      if (! IsValidPageAlignedPhyAddr(vm->vmwrite_bitmap_addr)) {
669        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMWRITE bitmap phy addr malformed"));
670        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
671      }
672   }
673 
674   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_VIOLATION_EXCEPTION) {
675      vm->ve_info_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VE_EXCEPTION_INFO_ADDR);
676      if (! IsValidPageAlignedPhyAddr(vm->ve_info_addr)) {
677        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: broken #VE information address"));
678        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
679      }
680   }
681 #endif
682 
683 #if BX_SUPPORT_X86_64
684   if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_TPR_SHADOW) {
685      vm->virtual_apic_page_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR);
686      if (! IsValidPageAlignedPhyAddr(vm->virtual_apic_page_addr)) {
687        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtual apic phy addr malformed"));
688        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
689      }
690 
691 #if BX_SUPPORT_VMX >= 2
692      if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) {
693        if (! PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT)) {
694          BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtual interrupt delivery must be set together with external interrupt exiting"));
695          return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
696        }
697 
698        for (int reg = 0; reg < 8; reg++) {
699          vm->eoi_exit_bitmap[reg] = VMread32(VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP0 + reg);
700        }
701 
702        Bit16u guest_interrupt_status = VMread16(VMCS_16BIT_GUEST_INTERRUPT_STATUS);
703        vm->rvi = guest_interrupt_status & 0xff;
704        vm->svi = guest_interrupt_status >> 8;
705      }
706      else
707 #endif
708      {
709        vm->vm_tpr_threshold = VMread32(VMCS_32BIT_CONTROL_TPR_THRESHOLD);
710 
711        if (vm->vm_tpr_threshold & 0xfffffff0) {
712          BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold too big"));
713          return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
714        }
715 
716        if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES)) {
717          Bit8u tpr_shadow = (VMX_Read_Virtual_APIC(BX_LAPIC_TPR) >> 4) & 0xf;
718          if (vm->vm_tpr_threshold > tpr_shadow) {
719            BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold > TPR shadow"));
720            return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
721          }
722        }
723      }
724   }
725 #if BX_SUPPORT_VMX >= 2
726   else { // TPR shadow is disabled
727      if (vm->vmexec_ctrls3 & (VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE |
728                               VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS |
729                               VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY))
730      {
731        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: apic virtualization is enabled without TPR shadow"));
732        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
733      }
734   }
735 #endif // BX_SUPPORT_VMX >= 2
736 
737   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES) {
738      vm->apic_access_page = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR);
739      if (! IsValidPageAlignedPhyAddr(vm->apic_access_page)) {
740        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: apic access page phy addr malformed"));
741        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
742      }
743 
744 #if BX_SUPPORT_VMX >= 2
745      if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE) {
746        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtualize X2APIC mode enabled together with APIC access virtualization"));
747        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
748      }
749 #endif
750   }
751 
752 #if BX_SUPPORT_VMX >= 2
753   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
754      vm->eptptr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_EPTPTR);
755      if (! is_eptptr_valid(vm->eptptr)) {
756        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: invalid EPTPTR value"));
757        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
758      }
759   }
760   else {
761      if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
762        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: unrestricted guest without EPT"));
763        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
764      }
765   }
766 
767   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VPID_ENABLE) {
768      vm->vpid = VMread16(VMCS_16BIT_CONTROL_VPID);
769      if (vm->vpid == 0) {
770        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: guest VPID == 0"));
771        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
772      }
773   }
774 
775   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_PAUSE_LOOP_VMEXIT) {
776      vm->ple.pause_loop_exiting_gap = VMread32(VMCS_32BIT_CONTROL_PAUSE_LOOP_EXITING_GAP);
777      vm->ple.pause_loop_exiting_window = VMread32(VMCS_32BIT_CONTROL_PAUSE_LOOP_EXITING_WINDOW);
778   }
779 
780   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VMFUNC_ENABLE)
781     vm->vmfunc_ctrls = VMread64(VMCS_64BIT_CONTROL_VMFUNC_CTRLS);
782   else
783     vm->vmfunc_ctrls = 0;
784 
785   if (vm->vmfunc_ctrls & ~VMX_VMFUNC_CTRL1_SUPPORTED_BITS) {
786      BX_ERROR(("VMFAIL: VMCS VM Functions control reserved bits set"));
787      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
788   }
789 
790   if (vm->vmfunc_ctrls & VMX_VMFUNC_EPTP_SWITCHING_MASK) {
791      if ((vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) == 0) {
792        BX_ERROR(("VMFAIL: VMFUNC EPTP-SWITCHING: EPT disabled"));
793        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
794      }
795 
796      vm->eptp_list_address = VMread64(VMCS_64BIT_CONTROL_EPTP_LIST_ADDRESS);
797      if (! IsValidPageAlignedPhyAddr(vm->eptp_list_address)) {
798        BX_ERROR(("VMFAIL: VMFUNC EPTP-SWITCHING: eptp list phy addr malformed"));
799        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
800      }
801   }
802 
803   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_PML_ENABLE) {
804     if ((vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) == 0) {
805        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: PML is enabled without EPT"));
806        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
807     }
808 
809     vm->pml_address = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_PML_ADDRESS);
810     if (! IsValidPageAlignedPhyAddr(vm->pml_address)) {
811        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: PML base phy addr malformed"));
812        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
813     }
814     vm->pml_index = VMread16(VMCS_16BIT_GUEST_PML_INDEX);
815   }
816 
817   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_SUBPAGE_WR_PROTECT_CTRL) {
818     if ((vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) == 0) {
819        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: SPP is enabled without EPT"));
820        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
821     }
822 
823     vm->spptp = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_SPPTP);
824     if (! IsValidPageAlignedPhyAddr(vm->spptp)) {
825        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: SPP base phy addr malformed"));
826        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
827     }
828   }
829 
830   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_XSAVES_XRSTORS)
831      vm->xss_exiting_bitmap = VMread64(VMCS_64BIT_CONTROL_XSS_EXITING_BITMAP);
832   else
833      vm->xss_exiting_bitmap = 0;
834 #endif
835 
836 #endif // BX_SUPPORT_X86_64
837 
838   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_TSC_SCALING) {
839      if ((vm->tsc_multiplier = VMread64(VMCS_64BIT_CONTROL_TSC_MULTIPLIER)) == 0) {
840        BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TSC multiplier should be non zero"));
841        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
842      }
843   }
844 
845   //
846   // Load VM-exit control fields to VMCS Cache
847   //
848 
849   vm->vmexit_ctrls = VMread32(VMCS_32BIT_CONTROL_VMEXIT_CONTROLS);
850   vm->vmexit_msr_store_cnt = VMread32(VMCS_32BIT_CONTROL_VMEXIT_MSR_STORE_COUNT);
851   vm->vmexit_msr_load_cnt = VMread32(VMCS_32BIT_CONTROL_VMEXIT_MSR_LOAD_COUNT);
852 
853   //
854   // Check VM-exit control fields
855   //
856 
857   if (~vm->vmexit_ctrls & VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_LO) {
858      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmexit controls allowed 0-settings"));
859      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
860   }
861   if (vm->vmexit_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_HI) {
862      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmexit controls allowed 1-settings [0x%08x]", vm->vmexit_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_HI));
863      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
864   }
865 
866 #if BX_SUPPORT_VMX >= 2
867   if ((~vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VMX_PREEMPTION_TIMER_VMEXIT) && (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_VMX_PREEMPTION_TIMER)) {
868      BX_ERROR(("VMFAIL: save_VMX_preemption_timer VMEXIT control is set but VMX_preemption_timer VMEXEC control is clear"));
869      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
870   }
871 #endif
872 
873   if (vm->vmexit_msr_store_cnt > 0) {
874      vm->vmexit_msr_store_addr = VMread64(VMCS_64BIT_CONTROL_VMEXIT_MSR_STORE_ADDR);
875      if ((vm->vmexit_msr_store_addr & 0xf) != 0 || ! IsValidPhyAddr(vm->vmexit_msr_store_addr)) {
876        BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr store addr malformed"));
877        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
878      }
879 
880      Bit64u last_byte = vm->vmexit_msr_store_addr + (vm->vmexit_msr_store_cnt * 16) - 1;
881      if (! IsValidPhyAddr(last_byte)) {
882        BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr store addr too high"));
883        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
884      }
885   }
886 
887   if (vm->vmexit_msr_load_cnt > 0) {
888      vm->vmexit_msr_load_addr = VMread64(VMCS_64BIT_CONTROL_VMEXIT_MSR_LOAD_ADDR);
889      if ((vm->vmexit_msr_load_addr & 0xf) != 0 || ! IsValidPhyAddr(vm->vmexit_msr_load_addr)) {
890        BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr load addr malformed"));
891        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
892      }
893 
894      Bit64u last_byte = (Bit64u) vm->vmexit_msr_load_addr + (vm->vmexit_msr_load_cnt * 16) - 1;
895      if (! IsValidPhyAddr(last_byte)) {
896        BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr load addr too high"));
897        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
898      }
899   }
900 
901   //
902   // Load VM-entry control fields to VMCS Cache
903   //
904 
905   vm->vmentry_ctrls = VMread32(VMCS_32BIT_CONTROL_VMENTRY_CONTROLS);
906   vm->vmentry_msr_load_cnt = VMread32(VMCS_32BIT_CONTROL_VMENTRY_MSR_LOAD_COUNT);
907 
908   //
909   // Check VM-entry control fields
910   //
911 
912   if (~vm->vmentry_ctrls & VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_LO) {
913      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmentry controls allowed 0-settings"));
914      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
915   }
916   if (vm->vmentry_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_HI) {
917      BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmentry controls allowed 1-settings [0x%08x]", vm->vmentry_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_HI));
918      return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
919   }
920 
921   if (vm->vmentry_ctrls & VMX_VMENTRY_CTRL1_DEACTIVATE_DUAL_MONITOR_TREATMENT) {
922      if (! BX_CPU_THIS_PTR in_smm) {
923        BX_ERROR(("VMFAIL: VMENTRY from outside SMM with dual-monitor treatment enabled"));
924        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
925      }
926   }
927 
928   if (vm->vmentry_msr_load_cnt > 0) {
929      vm->vmentry_msr_load_addr = VMread64(VMCS_64BIT_CONTROL_VMENTRY_MSR_LOAD_ADDR);
930      if ((vm->vmentry_msr_load_addr & 0xf) != 0 || ! IsValidPhyAddr(vm->vmentry_msr_load_addr)) {
931        BX_ERROR(("VMFAIL: VMCS VMENTRY CTRL: msr load addr malformed"));
932        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
933      }
934 
935      Bit64u last_byte = vm->vmentry_msr_load_addr + (vm->vmentry_msr_load_cnt * 16) - 1;
936      if (! IsValidPhyAddr(last_byte)) {
937        BX_ERROR(("VMFAIL: VMCS VMENTRY CTRL: msr load addr too high"));
938        return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
939      }
940   }
941 
942   //
943   // Check VM-entry event injection info
944   //
945 
946   vm->vmentry_interr_info = VMread32(VMCS_32BIT_CONTROL_VMENTRY_INTERRUPTION_INFO);
947   vm->vmentry_excep_err_code = VMread32(VMCS_32BIT_CONTROL_VMENTRY_EXCEPTION_ERR_CODE);
948   vm->vmentry_instr_length = VMread32(VMCS_32BIT_CONTROL_VMENTRY_INSTRUCTION_LENGTH);
949 
950   if (VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info)) {
951 
952      /* the VMENTRY injecting event to the guest */
953      unsigned vector = vm->vmentry_interr_info & 0xff;
954      unsigned event_type = (vm->vmentry_interr_info >>  8) & 7;
955      unsigned push_error = (vm->vmentry_interr_info >> 11) & 1;
956      unsigned error_code = push_error ? vm->vmentry_excep_err_code : 0;
957 
958      unsigned push_error_reference = 0;
959      if (event_type == BX_HARDWARE_EXCEPTION && vector < BX_CPU_HANDLED_EXCEPTIONS)
960         push_error_reference = exceptions_info[vector].push_error;
961 #if BX_SUPPORT_CET
962      if (! BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_CET)) {
963         if (vector == BX_CP_EXCEPTION) push_error_reference = false;
964      }
965 #endif
966 
967      if (vm->vmentry_interr_info & 0x7ffff000) {
968         BX_ERROR(("VMFAIL: VMENTRY broken interruption info field"));
969         return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
970      }
971 
972      switch (event_type) {
973        case BX_EXTERNAL_INTERRUPT:
974          break;
975 
976        case BX_NMI:
977          if (vector != 2) {
978            BX_ERROR(("VMFAIL: VMENTRY bad injected event vector %d", vector));
979            return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
980          }
981 /*
982          // injecting NMI
983          if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) {
984            if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED) {
985              BX_ERROR(("VMFAIL: VMENTRY injected NMI vector when blocked by NMI in interruptibility state", vector));
986              return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
987            }
988          }
989 */
990          break;
991 
992        case BX_HARDWARE_EXCEPTION:
993          if (vector > 31) {
994            BX_ERROR(("VMFAIL: VMENTRY bad injected event vector %d", vector));
995            return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
996          }
997          break;
998 
999        case BX_SOFTWARE_INTERRUPT:
1000        case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
1001        case BX_SOFTWARE_EXCEPTION:
1002          if ((vm->vmentry_instr_length == 0 && !BX_SUPPORT_VMX_EXTENSION(BX_VMX_SW_INTERRUPT_INJECTION_ILEN_0)) ||
1003               vm->vmentry_instr_length > 15)
1004          {
1005            BX_ERROR(("VMFAIL: VMENTRY bad injected event instr length"));
1006            return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
1007          }
1008          break;
1009 
1010        case 7: /* MTF */
1011          if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_MONITOR_TRAP_FLAG)) {
1012            if (vector != 0) {
1013              BX_ERROR(("VMFAIL: VMENTRY bad MTF injection with vector=%d", vector));
1014              return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
1015            }
1016          }
1017          break;
1018 
1019        default:
1020          BX_ERROR(("VMFAIL: VMENTRY bad injected event type %d", event_type));
1021          return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
1022      }
1023 
1024 #if BX_SUPPORT_VMX >= 2
1025      if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
1026        unsigned protected_mode_guest = (Bit32u) VMread_natural(VMCS_GUEST_CR0) & BX_CR0_PE_MASK;
1027        if (! protected_mode_guest) push_error_reference = 0;
1028      }
1029 #endif
1030 
1031      if (! BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_CET)) {
1032        // CET added new #CP exception with error code but legacy software assumed that this vector have no error code.
1033        // Therefore CET enabled processors do not check the error code anymore and able to deliver a hardware
1034        // exception with or without an error code, regardless of vector as indicated in VMX_MSR_VMX_BASIC[56]
1035        if (push_error != push_error_reference) {
1036          BX_ERROR(("VMFAIL: VMENTRY injected event vector %d broken error code", vector));
1037          return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
1038        }
1039      }
1040 
1041      if (push_error) {
1042        if (error_code & 0xffff0000) {
1043          BX_ERROR(("VMFAIL: VMENTRY bad error code 0x%08x for injected event %d", error_code, vector));
1044          return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
1045        }
1046      }
1047   }
1048 
1049   return VMXERR_NO_ERROR;
1050 }
1051 
VMenterLoadCheckHostState(void)1052 VMX_error_code BX_CPU_C::VMenterLoadCheckHostState(void)
1053 {
1054   VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
1055   VMCS_HOST_STATE *host_state = &vm->host_state;
1056   bool x86_64_host = false, x86_64_guest = false;
1057 
1058   //
1059   // VM Host State Checks Related to Address-Space Size
1060   //
1061 
1062   Bit32u vmexit_ctrls = vm->vmexit_ctrls;
1063   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_HOST_ADDR_SPACE_SIZE) {
1064      x86_64_host = true;
1065   }
1066   Bit32u vmentry_ctrls = vm->vmentry_ctrls;
1067   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_X86_64_GUEST) {
1068      x86_64_guest = true;
1069   }
1070 
1071 #if BX_SUPPORT_X86_64
1072   if (long_mode()) {
1073      if (! x86_64_host) {
1074         BX_ERROR(("VMFAIL: VMCS x86-64 host control invalid on VMENTRY"));
1075         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1076      }
1077   }
1078   else
1079 #endif
1080   {
1081      if (x86_64_host || x86_64_guest) {
1082         BX_ERROR(("VMFAIL: VMCS x86-64 guest(%d)/host(%d) controls invalid on VMENTRY", x86_64_guest, x86_64_host));
1083         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1084      }
1085   }
1086 
1087   //
1088   // Load and Check VM Host State to VMCS Cache
1089   //
1090 
1091   host_state->cr0 = (bx_address) VMread_natural(VMCS_HOST_CR0);
1092   if (~host_state->cr0 & VMX_MSR_CR0_FIXED0) {
1093      BX_ERROR(("VMFAIL: VMCS host state invalid CR0 0x%08x", (Bit32u) host_state->cr0));
1094      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1095   }
1096 
1097   if (host_state->cr0 & ~VMX_MSR_CR0_FIXED1) {
1098      BX_ERROR(("VMFAIL: VMCS host state invalid CR0 0x%08x", (Bit32u) host_state->cr0));
1099      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1100   }
1101 
1102   host_state->cr3 = (bx_address) VMread_natural(VMCS_HOST_CR3);
1103 #if BX_SUPPORT_X86_64
1104   if (! IsValidPhyAddr(host_state->cr3)) {
1105      BX_ERROR(("VMFAIL: VMCS host state invalid CR3"));
1106      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1107   }
1108 #endif
1109 
1110   host_state->cr4 = (bx_address) VMread_natural(VMCS_HOST_CR4);
1111   if (~host_state->cr4 & VMX_MSR_CR4_FIXED0) {
1112      BX_ERROR(("VMFAIL: VMCS host state invalid CR4 0x" FMT_ADDRX, host_state->cr4));
1113      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1114   }
1115   if (host_state->cr4 & ~VMX_MSR_CR4_FIXED1) {
1116      BX_ERROR(("VMFAIL: VMCS host state invalid CR4 0x" FMT_ADDRX, host_state->cr4));
1117      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1118   }
1119 
1120   for(int n=0; n<6; n++) {
1121      host_state->segreg_selector[n] = VMread16(VMCS_16BIT_HOST_ES_SELECTOR + 2*n);
1122      if (host_state->segreg_selector[n] & 7) {
1123         BX_ERROR(("VMFAIL: VMCS host segreg %d TI/RPL != 0", n));
1124         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1125      }
1126   }
1127 
1128   if (host_state->segreg_selector[BX_SEG_REG_CS] == 0) {
1129      BX_ERROR(("VMFAIL: VMCS host CS selector 0"));
1130      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1131   }
1132 
1133   if (! x86_64_host && host_state->segreg_selector[BX_SEG_REG_SS] == 0) {
1134      BX_ERROR(("VMFAIL: VMCS host SS selector 0"));
1135      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1136   }
1137 
1138   host_state->tr_selector = VMread16(VMCS_16BIT_HOST_TR_SELECTOR);
1139   if (! host_state->tr_selector || (host_state->tr_selector & 7) != 0) {
1140      BX_ERROR(("VMFAIL: VMCS invalid host TR selector"));
1141      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1142   }
1143 
1144   host_state->tr_base = (bx_address) VMread_natural(VMCS_HOST_TR_BASE);
1145 #if BX_SUPPORT_X86_64
1146   if (! IsCanonical(host_state->tr_base)) {
1147      BX_ERROR(("VMFAIL: VMCS host TR BASE non canonical"));
1148      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1149   }
1150 #endif
1151 
1152   host_state->fs_base = (bx_address) VMread_natural(VMCS_HOST_FS_BASE);
1153   host_state->gs_base = (bx_address) VMread_natural(VMCS_HOST_GS_BASE);
1154 #if BX_SUPPORT_X86_64
1155   if (! IsCanonical(host_state->fs_base)) {
1156      BX_ERROR(("VMFAIL: VMCS host FS BASE non canonical"));
1157      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1158   }
1159   if (! IsCanonical(host_state->gs_base)) {
1160      BX_ERROR(("VMFAIL: VMCS host GS BASE non canonical"));
1161      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1162   }
1163 #endif
1164 
1165   host_state->gdtr_base = (bx_address) VMread_natural(VMCS_HOST_GDTR_BASE);
1166   host_state->idtr_base = (bx_address) VMread_natural(VMCS_HOST_IDTR_BASE);
1167 #if BX_SUPPORT_X86_64
1168   if (! IsCanonical(host_state->gdtr_base)) {
1169      BX_ERROR(("VMFAIL: VMCS host GDTR BASE non canonical"));
1170      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1171   }
1172   if (! IsCanonical(host_state->idtr_base)) {
1173      BX_ERROR(("VMFAIL: VMCS host IDTR BASE non canonical"));
1174      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1175   }
1176 #endif
1177 
1178   host_state->sysenter_esp_msr = (bx_address) VMread_natural(VMCS_HOST_IA32_SYSENTER_ESP_MSR);
1179   host_state->sysenter_eip_msr = (bx_address) VMread_natural(VMCS_HOST_IA32_SYSENTER_EIP_MSR);
1180   host_state->sysenter_cs_msr = (Bit16u) VMread32(VMCS_32BIT_HOST_IA32_SYSENTER_CS_MSR);
1181 
1182 #if BX_SUPPORT_X86_64
1183   if (! IsCanonical(host_state->sysenter_esp_msr)) {
1184      BX_ERROR(("VMFAIL: VMCS host SYSENTER_ESP_MSR non canonical"));
1185      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1186   }
1187 
1188   if (! IsCanonical(host_state->sysenter_eip_msr)) {
1189      BX_ERROR(("VMFAIL: VMCS host SYSENTER_EIP_MSR non canonical"));
1190      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1191   }
1192 #endif
1193 
1194 #if BX_SUPPORT_VMX >= 2
1195   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_PAT_MSR) {
1196     host_state->pat_msr = VMread64(VMCS_64BIT_HOST_IA32_PAT);
1197     if (! isValidMSR_PAT(host_state->pat_msr)) {
1198       BX_ERROR(("VMFAIL: invalid Memory Type in host MSR_PAT"));
1199       return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1200     }
1201   }
1202 #endif
1203 
1204   host_state->rsp = (bx_address) VMread_natural(VMCS_HOST_RSP);
1205   host_state->rip = (bx_address) VMread_natural(VMCS_HOST_RIP);
1206 
1207 #if BX_SUPPORT_CET
1208   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_HOST_CET_STATE) {
1209     host_state->msr_ia32_s_cet = VMread_natural(VMCS_HOST_IA32_S_CET);
1210     if (!IsCanonical(host_state->msr_ia32_s_cet) || (!x86_64_host && GET32H(host_state->msr_ia32_s_cet))) {
1211        BX_ERROR(("VMFAIL: VMCS host IA32_S_CET/EB_LEG_BITMAP_BASE non canonical or invalid"));
1212        return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1213     }
1214 
1215     if (is_invalid_cet_control(host_state->msr_ia32_s_cet)) {
1216        BX_ERROR(("VMFAIL: VMCS host IA32_S_CET invalid"));
1217        return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1218     }
1219 
1220     host_state->ssp = VMread_natural(VMCS_HOST_SSP);
1221     if (!IsCanonical(host_state->ssp) || (!x86_64_host && GET32H(host_state->ssp))) {
1222        BX_ERROR(("VMFAIL: VMCS host SSP non canonical or invalid"));
1223        return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1224     }
1225     if ((host_state->ssp & 0x3) != 0) {
1226        BX_ERROR(("VMFAIL: VMCS host SSP[1:0] not zero"));
1227        return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1228     }
1229 
1230     host_state->interrupt_ssp_table_address = VMread_natural(VMCS_HOST_INTERRUPT_SSP_TABLE_ADDR);
1231     if (!IsCanonical(host_state->interrupt_ssp_table_address)) {
1232        BX_ERROR(("VMFAIL: VMCS host INTERRUPT_SSP_TABLE_ADDR non canonical or invalid"));
1233        return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1234     }
1235 
1236     if ((host_state->cr4 & BX_CR4_CET_MASK) && (host_state->cr0 & BX_CR0_WP_MASK) == 0) {
1237       BX_ERROR(("FAIL: VMCS host CR4.CET=1 when CR0.WP=0"));
1238       return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1239     }
1240   }
1241 #endif
1242 
1243 #if BX_SUPPORT_PKEYS
1244   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_HOST_PKRS) {
1245     host_state->pkrs = VMread64(VMCS_64BIT_HOST_IA32_PKRS);
1246     if (GET32H(host_state->pkrs) != 0) {
1247       BX_ERROR(("VMFAIL: invalid host IA32_PKRS value"));
1248       return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1249     }
1250   }
1251 #endif
1252 
1253 #if BX_SUPPORT_X86_64
1254 
1255 #if BX_SUPPORT_VMX >= 2
1256   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_EFER_MSR) {
1257     host_state->efer_msr = VMread64(VMCS_64BIT_HOST_IA32_EFER);
1258     if (host_state->efer_msr & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
1259       BX_ERROR(("VMFAIL: VMCS host EFER reserved bits set !"));
1260       return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1261     }
1262     bool lme = (host_state->efer_msr >>  8) & 0x1;
1263     bool lma = (host_state->efer_msr >> 10) & 0x1;
1264     if (lma != lme || lma != x86_64_host) {
1265       BX_ERROR(("VMFAIL: VMCS host EFER (0x%08x) inconsistent value !", (Bit32u) host_state->efer_msr));
1266       return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1267     }
1268   }
1269 #endif
1270 
1271   if (x86_64_host) {
1272      if ((host_state->cr4 & BX_CR4_PAE_MASK) == 0) {
1273         BX_ERROR(("VMFAIL: VMCS host CR4.PAE=0 with x86-64 host"));
1274         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1275      }
1276      if (! IsCanonical(host_state->rip)) {
1277         BX_ERROR(("VMFAIL: VMCS host RIP non-canonical"));
1278         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1279      }
1280   }
1281   else {
1282      if (GET32H(host_state->rip) != 0) {
1283         BX_ERROR(("VMFAIL: VMCS host RIP > 32 bit"));
1284         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1285      }
1286      if (host_state->cr4 & BX_CR4_PCIDE_MASK) {
1287         BX_ERROR(("VMFAIL: VMCS host CR4.PCIDE set"));
1288         return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1289      }
1290   }
1291 #endif
1292 
1293   return VMXERR_NO_ERROR;
1294 }
1295 
IsLimitAccessRightsConsistent(Bit32u limit,Bit32u ar)1296 BX_CPP_INLINE bool IsLimitAccessRightsConsistent(Bit32u limit, Bit32u ar)
1297 {
1298   bool g = (ar >> 15) & 1;
1299 
1300   // access rights reserved bits set
1301   if (ar & 0xfffe0f00) return 0;
1302 
1303   if (g) {
1304     // if any of the bits in limit[11:00] are '0 <=> G must be '0
1305     if ((limit & 0xfff) != 0xfff)
1306        return 0;
1307   }
1308   else {
1309     // if any of the bits in limit[31:20] are '1 <=> G must be '1
1310     if ((limit & 0xfff00000) != 0)
1311        return 0;
1312   }
1313 
1314   return 1;
1315 }
1316 
VMenterLoadCheckGuestState(Bit64u * qualification)1317 Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
1318 {
1319   int n;
1320 
1321   VMCS_GUEST_STATE guest;
1322   VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
1323 
1324   *qualification = VMENTER_ERR_NO_ERROR;
1325 
1326   //
1327   // Load and Check Guest State from VMCS
1328   //
1329 
1330   guest.rflags = VMread_natural(VMCS_GUEST_RFLAGS);
1331   // RFLAGS reserved bits [63:22], bit 15, bit 5, bit 3 must be zero
1332   if (guest.rflags & BX_CONST64(0xFFFFFFFFFFC08028)) {
1333      BX_ERROR(("VMENTER FAIL: RFLAGS reserved bits are set"));
1334      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1335   }
1336   // RFLAGS[1] must be always set
1337   if ((guest.rflags & 0x2) == 0) {
1338      BX_ERROR(("VMENTER FAIL: RFLAGS[1] cleared"));
1339      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1340   }
1341 
1342   bool v8086_guest = false;
1343   if (guest.rflags & EFlagsVMMask)
1344      v8086_guest = true;
1345 
1346   bool x86_64_guest = false; // can't be 1 if X86_64 is not supported (checked before)
1347   Bit32u vmentry_ctrls = vm->vmentry_ctrls;
1348 #if BX_SUPPORT_X86_64
1349   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_X86_64_GUEST) {
1350      BX_DEBUG(("VMENTER to x86-64 guest"));
1351      x86_64_guest = true;
1352   }
1353 #endif
1354 
1355   if (x86_64_guest && v8086_guest) {
1356      BX_ERROR(("VMENTER FAIL: Enter to x86-64 guest with RFLAGS.VM"));
1357      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1358   }
1359 
1360   guest.cr0 = VMread_natural(VMCS_GUEST_CR0);
1361 
1362 #if BX_SUPPORT_VMX >= 2
1363   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
1364      if (~guest.cr0 & (VMX_MSR_CR0_FIXED0 & ~(BX_CR0_PE_MASK | BX_CR0_PG_MASK))) {
1365         BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR0"));
1366         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1367      }
1368 
1369      bool pe = (guest.cr0 & BX_CR0_PE_MASK) != 0;
1370      bool pg = (guest.cr0 & BX_CR0_PG_MASK) != 0;
1371      if (pg && !pe) {
1372         BX_ERROR(("VMENTER FAIL: VMCS unrestricted guest CR0.PG without CR0.PE"));
1373         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1374      }
1375   }
1376   else
1377 #endif
1378   {
1379      if (~guest.cr0 & VMX_MSR_CR0_FIXED0) {
1380         BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR0"));
1381         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1382      }
1383   }
1384 
1385   if (guest.cr0 & ~VMX_MSR_CR0_FIXED1) {
1386      BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR0"));
1387      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1388   }
1389 
1390 #if BX_SUPPORT_VMX >= 2
1391   bool real_mode_guest = false;
1392   if (! (guest.cr0 & BX_CR0_PE_MASK))
1393      real_mode_guest = true;
1394 #endif
1395 
1396   guest.cr3 = VMread_natural(VMCS_GUEST_CR3);
1397 #if BX_SUPPORT_X86_64
1398   if (! IsValidPhyAddr(guest.cr3)) {
1399      BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR3"));
1400      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1401   }
1402 #endif
1403 
1404   guest.cr4 = VMread_natural(VMCS_GUEST_CR4);
1405   if (~guest.cr4 & VMX_MSR_CR4_FIXED0) {
1406      BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR4"));
1407      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1408   }
1409 
1410   if (guest.cr4 & ~VMX_MSR_CR4_FIXED1) {
1411      BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR4"));
1412      return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1413   }
1414 
1415 #if BX_SUPPORT_X86_64
1416   if (x86_64_guest) {
1417      if ((guest.cr4 & BX_CR4_PAE_MASK) == 0) {
1418         BX_ERROR(("VMENTER FAIL: VMCS guest CR4.PAE=0 in x86-64 mode"));
1419         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1420      }
1421   }
1422   else {
1423      if (guest.cr4 & BX_CR4_PCIDE_MASK) {
1424         BX_ERROR(("VMENTER FAIL: VMCS CR4.PCIDE set in 32-bit guest"));
1425         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1426      }
1427   }
1428 
1429   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_DBG_CTRLS) {
1430      guest.dr7 = VMread_natural(VMCS_GUEST_DR7);
1431      if (GET32H(guest.dr7)) {
1432         BX_ERROR(("VMENTER FAIL: VMCS guest invalid DR7"));
1433         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1434      }
1435   }
1436 #endif
1437 
1438 #if BX_SUPPORT_CET
1439   if ((guest.cr4 & BX_CR4_CET_MASK) && (guest.cr0 & BX_CR0_WP_MASK) == 0) {
1440     BX_ERROR(("VMENTER FAIL: VMCS guest CR4.CET=1 when CR0.WP=0"));
1441     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1442   }
1443 
1444   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_GUEST_CET_STATE) {
1445     guest.msr_ia32_s_cet = VMread_natural(VMCS_GUEST_IA32_S_CET);
1446     if (!IsCanonical(guest.msr_ia32_s_cet) || (!x86_64_guest && GET32H(guest.msr_ia32_s_cet))) {
1447        BX_ERROR(("VMFAIL: VMCS guest IA32_S_CET/EB_LEG_BITMAP_BASE non canonical or invalid"));
1448        return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1449     }
1450 
1451     if (is_invalid_cet_control(guest.msr_ia32_s_cet)) {
1452        BX_ERROR(("VMFAIL: VMCS guest IA32_S_CET invalid"));
1453        return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
1454     }
1455 
1456     guest.ssp = VMread_natural(VMCS_GUEST_SSP);
1457     if (!IsCanonical(guest.ssp) || (!x86_64_guest && GET32H(guest.ssp))) {
1458        BX_ERROR(("VMFAIL: VMCS guest SSP non canonical or invalid"));
1459        return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1460     }
1461     if ((guest.ssp & 0x3) != 0) {
1462        BX_ERROR(("VMFAIL: VMCS guest SSP[1:0] not zero"));
1463        return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1464     }
1465 
1466     guest.interrupt_ssp_table_address = VMread_natural(VMCS_GUEST_INTERRUPT_SSP_TABLE_ADDR);
1467     if (!IsCanonical(guest.interrupt_ssp_table_address)) {
1468        BX_ERROR(("VMFAIL: VMCS guest INTERRUPT_SSP_TABLE_ADDR non canonical or invalid"));
1469        return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1470     }
1471   }
1472 #endif
1473 
1474 #if BX_SUPPORT_PKEYS
1475   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_GUEST_PKRS) {
1476     guest.pkrs = VMread64(VMCS_64BIT_GUEST_IA32_PKRS);
1477     if (GET32H(guest.pkrs) != 0) {
1478       BX_ERROR(("VMFAIL: invalid guest IA32_PKRS value"));
1479       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1480     }
1481   }
1482 #endif
1483 
1484   //
1485   // Load and Check Guest State from VMCS - Segment Registers
1486   //
1487 
1488   for (n=0; n<6; n++) {
1489      Bit16u selector = VMread16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n);
1490      bx_address base = (bx_address) VMread_natural(VMCS_GUEST_ES_BASE + 2*n);
1491      Bit32u limit = VMread32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n);
1492      Bit32u ar = VMread32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n);
1493      ar = vmx_unpack_ar_field(ar, BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
1494      bool invalid = (ar >> 16) & 1;
1495 
1496      set_segment_ar_data(&guest.sregs[n], !invalid,
1497                   (Bit16u) selector, base, limit, (Bit16u) ar);
1498 
1499      if (v8086_guest) {
1500         // guest in V8086 mode
1501         if (base != ((bx_address)(selector << 4))) {
1502           BX_ERROR(("VMENTER FAIL: VMCS v8086 guest bad %s.BASE", segname[n]));
1503           return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1504         }
1505         if (limit != 0xffff) {
1506           BX_ERROR(("VMENTER FAIL: VMCS v8086 guest %s.LIMIT != 0xFFFF", segname[n]));
1507           return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1508         }
1509         // present, expand-up read/write accessed, segment, DPL=3
1510         if (ar != 0xF3) {
1511           BX_ERROR(("VMENTER FAIL: VMCS v8086 guest %s.AR != 0xF3", segname[n]));
1512           return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1513         }
1514 
1515         continue; // go to next segment register
1516      }
1517 
1518 #if BX_SUPPORT_X86_64
1519      if (n >= BX_SEG_REG_FS) {
1520         if (! IsCanonical(base)) {
1521           BX_ERROR(("VMENTER FAIL: VMCS guest %s.BASE non canonical", segname[n]));
1522           return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1523         }
1524      }
1525 #endif
1526 
1527      if (n != BX_SEG_REG_CS && invalid)
1528         continue;
1529 
1530 #if BX_SUPPORT_X86_64
1531      if (n == BX_SEG_REG_SS && (selector & BX_SELECTOR_RPL_MASK) == 0) {
1532         // SS is allowed to be NULL selector if going to 64-bit guest
1533         if (x86_64_guest && guest.sregs[BX_SEG_REG_CS].cache.u.segment.l)
1534            continue;
1535      }
1536 
1537      if (n < BX_SEG_REG_FS) {
1538         if (GET32H(base) != 0) {
1539           BX_ERROR(("VMENTER FAIL: VMCS guest %s.BASE > 32 bit", segname[n]));
1540           return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1541         }
1542      }
1543 #endif
1544 
1545      if (! guest.sregs[n].cache.segment) {
1546         BX_ERROR(("VMENTER FAIL: VMCS guest %s not segment", segname[n]));
1547         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1548      }
1549 
1550      if (! guest.sregs[n].cache.p) {
1551         BX_ERROR(("VMENTER FAIL: VMCS guest %s not present", segname[n]));
1552         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1553      }
1554 
1555      if (! IsLimitAccessRightsConsistent(limit, ar)) {
1556         BX_ERROR(("VMENTER FAIL: VMCS guest %s.AR/LIMIT malformed", segname[n]));
1557         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1558      }
1559 
1560      if (n == BX_SEG_REG_CS) {
1561         // CS checks
1562         switch (guest.sregs[BX_SEG_REG_CS].cache.type) {
1563           case BX_CODE_EXEC_ONLY_ACCESSED:
1564           case BX_CODE_EXEC_READ_ACCESSED:
1565              // non-conforming segment
1566              if (guest.sregs[BX_SEG_REG_CS].selector.rpl != guest.sregs[BX_SEG_REG_CS].cache.dpl) {
1567                BX_ERROR(("VMENTER FAIL: VMCS guest non-conforming CS.RPL <> CS.DPL"));
1568                return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1569              }
1570              break;
1571           case BX_CODE_EXEC_ONLY_CONFORMING_ACCESSED:
1572           case BX_CODE_EXEC_READ_CONFORMING_ACCESSED:
1573              // conforming segment
1574              if (guest.sregs[BX_SEG_REG_CS].selector.rpl < guest.sregs[BX_SEG_REG_CS].cache.dpl) {
1575                BX_ERROR(("VMENTER FAIL: VMCS guest non-conforming CS.RPL < CS.DPL"));
1576                return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1577              }
1578              break;
1579 #if BX_SUPPORT_VMX >= 2
1580           case BX_DATA_READ_WRITE_ACCESSED:
1581              if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
1582                if (guest.sregs[BX_SEG_REG_CS].cache.dpl != 0) {
1583                  BX_ERROR(("VMENTER FAIL: VMCS unrestricted guest CS.DPL != 0"));
1584                  return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1585                }
1586                break;
1587              }
1588              // fall through
1589 #endif
1590           default:
1591              BX_ERROR(("VMENTER FAIL: VMCS guest CS.TYPE"));
1592              return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1593         }
1594 
1595 #if BX_SUPPORT_X86_64
1596         if (x86_64_guest) {
1597           if (guest.sregs[BX_SEG_REG_CS].cache.u.segment.d_b && guest.sregs[BX_SEG_REG_CS].cache.u.segment.l) {
1598              BX_ERROR(("VMENTER FAIL: VMCS x86_64 guest wrong CS.D_B/L"));
1599              return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1600           }
1601         }
1602 #endif
1603      }
1604      else if (n == BX_SEG_REG_SS) {
1605         // SS checks
1606         switch (guest.sregs[BX_SEG_REG_SS].cache.type) {
1607           case BX_DATA_READ_WRITE_ACCESSED:
1608           case BX_DATA_READ_WRITE_EXPAND_DOWN_ACCESSED:
1609              break;
1610           default:
1611              BX_ERROR(("VMENTER FAIL: VMCS guest SS.TYPE"));
1612              return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1613         }
1614      }
1615      else {
1616         // DS, ES, FS, GS
1617         if ((guest.sregs[n].cache.type & 0x1) == 0) {
1618            BX_ERROR(("VMENTER FAIL: VMCS guest %s not ACCESSED", segname[n]));
1619            return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1620         }
1621 
1622         if (guest.sregs[n].cache.type & 0x8) {
1623            if ((guest.sregs[n].cache.type & 0x2) == 0) {
1624               BX_ERROR(("VMENTER FAIL: VMCS guest CODE segment %s not READABLE", segname[n]));
1625               return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1626            }
1627         }
1628 
1629         if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST)) {
1630            if (guest.sregs[n].cache.type < 11) {
1631               // data segment or non-conforming code segment
1632               if (guest.sregs[n].selector.rpl > guest.sregs[n].cache.dpl) {
1633                 BX_ERROR(("VMENTER FAIL: VMCS guest non-conforming %s.RPL < %s.DPL", segname[n], segname[n]));
1634                 return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1635               }
1636            }
1637         }
1638      }
1639   }
1640 
1641   if (! v8086_guest) {
1642      if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST)) {
1643         if (guest.sregs[BX_SEG_REG_SS].selector.rpl != guest.sregs[BX_SEG_REG_CS].selector.rpl) {
1644            BX_ERROR(("VMENTER FAIL: VMCS guest CS.RPL != SS.RPL"));
1645            return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1646         }
1647         if (guest.sregs[BX_SEG_REG_SS].selector.rpl != guest.sregs[BX_SEG_REG_SS].cache.dpl) {
1648            BX_ERROR(("VMENTER FAIL: VMCS guest SS.RPL <> SS.DPL"));
1649            return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1650         }
1651      }
1652 #if BX_SUPPORT_VMX >= 2
1653      else { // unrestricted guest
1654         if (real_mode_guest || guest.sregs[BX_SEG_REG_CS].cache.type == BX_DATA_READ_WRITE_ACCESSED) {
1655            if (guest.sregs[BX_SEG_REG_SS].cache.dpl != 0) {
1656              BX_ERROR(("VMENTER FAIL: VMCS unrestricted guest SS.DPL != 0"));
1657              return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1658            }
1659         }
1660      }
1661 #endif
1662   }
1663 
1664   //
1665   // Load and Check Guest State from VMCS - GDTR/IDTR
1666   //
1667 
1668   Bit64u gdtr_base = VMread_natural(VMCS_GUEST_GDTR_BASE);
1669   Bit32u gdtr_limit = VMread32(VMCS_32BIT_GUEST_GDTR_LIMIT);
1670   Bit64u idtr_base = VMread_natural(VMCS_GUEST_IDTR_BASE);
1671   Bit32u idtr_limit = VMread32(VMCS_32BIT_GUEST_IDTR_LIMIT);
1672 
1673 #if BX_SUPPORT_X86_64
1674   if (! IsCanonical(gdtr_base) || ! IsCanonical(idtr_base)) {
1675     BX_ERROR(("VMENTER FAIL: VMCS guest IDTR/IDTR.BASE non canonical"));
1676     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1677   }
1678 #endif
1679   if (gdtr_limit > 0xffff || idtr_limit > 0xffff) {
1680      BX_ERROR(("VMENTER FAIL: VMCS guest GDTR/IDTR limit > 0xFFFF"));
1681      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1682   }
1683 
1684   //
1685   // Load and Check Guest State from VMCS - LDTR
1686   //
1687 
1688   Bit16u ldtr_selector = VMread16(VMCS_16BIT_GUEST_LDTR_SELECTOR);
1689   Bit64u ldtr_base = VMread_natural(VMCS_GUEST_LDTR_BASE);
1690   Bit32u ldtr_limit = VMread32(VMCS_32BIT_GUEST_LDTR_LIMIT);
1691   Bit32u ldtr_ar = VMread32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS);
1692   ldtr_ar = vmx_unpack_ar_field(ldtr_ar, BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
1693   bool ldtr_invalid = (ldtr_ar >> 16) & 1;
1694   if (set_segment_ar_data(&guest.ldtr, !ldtr_invalid,
1695          (Bit16u) ldtr_selector, ldtr_base, ldtr_limit, (Bit16u)(ldtr_ar)))
1696   {
1697      // ldtr is valid
1698      if (guest.ldtr.selector.ti) {
1699         BX_ERROR(("VMENTER FAIL: VMCS guest LDTR.TI set"));
1700         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1701      }
1702      if (guest.ldtr.cache.type != BX_SYS_SEGMENT_LDT) {
1703         BX_ERROR(("VMENTER FAIL: VMCS guest incorrect LDTR type (%d)", guest.ldtr.cache.type));
1704         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1705      }
1706      if (guest.ldtr.cache.segment) {
1707         BX_ERROR(("VMENTER FAIL: VMCS guest LDTR is not system segment"));
1708         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1709      }
1710      if (! guest.ldtr.cache.p) {
1711         BX_ERROR(("VMENTER FAIL: VMCS guest LDTR not present"));
1712         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1713      }
1714      if (! IsLimitAccessRightsConsistent(ldtr_limit, ldtr_ar)) {
1715         BX_ERROR(("VMENTER FAIL: VMCS guest LDTR.AR/LIMIT malformed"));
1716         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1717      }
1718 #if BX_SUPPORT_X86_64
1719      if (! IsCanonical(ldtr_base)) {
1720         BX_ERROR(("VMENTER FAIL: VMCS guest LDTR.BASE non canonical"));
1721         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1722      }
1723 #endif
1724   }
1725 
1726   //
1727   // Load and Check Guest State from VMCS - TR
1728   //
1729 
1730   Bit16u tr_selector = VMread16(VMCS_16BIT_GUEST_TR_SELECTOR);
1731   Bit64u tr_base = VMread_natural(VMCS_GUEST_TR_BASE);
1732   Bit32u tr_limit = VMread32(VMCS_32BIT_GUEST_TR_LIMIT);
1733   Bit32u tr_ar = VMread32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS);
1734   tr_ar = vmx_unpack_ar_field(tr_ar, BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
1735   bool tr_invalid = (tr_ar >> 16) & 1;
1736 
1737 #if BX_SUPPORT_X86_64
1738   if (! IsCanonical(tr_base)) {
1739     BX_ERROR(("VMENTER FAIL: VMCS guest TR.BASE non canonical"));
1740     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1741   }
1742 #endif
1743 
1744   set_segment_ar_data(&guest.tr, !tr_invalid,
1745       (Bit16u) tr_selector, tr_base, tr_limit, (Bit16u)(tr_ar));
1746 
1747   if (tr_invalid) {
1748      BX_ERROR(("VMENTER FAIL: VMCS guest TR invalid"));
1749      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1750   }
1751   if (guest.tr.selector.ti) {
1752      BX_ERROR(("VMENTER FAIL: VMCS guest TR.TI set"));
1753      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1754   }
1755   if (guest.tr.cache.segment) {
1756      BX_ERROR(("VMENTER FAIL: VMCS guest TR is not system segment"));
1757      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1758   }
1759   if (! guest.tr.cache.p) {
1760      BX_ERROR(("VMENTER FAIL: VMCS guest TR not present"));
1761      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1762   }
1763   if (! IsLimitAccessRightsConsistent(tr_limit, tr_ar)) {
1764      BX_ERROR(("VMENTER FAIL: VMCS guest TR.AR/LIMIT malformed"));
1765      return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1766   }
1767 
1768   switch(guest.tr.cache.type) {
1769     case BX_SYS_SEGMENT_BUSY_386_TSS:
1770       break;
1771     case BX_SYS_SEGMENT_BUSY_286_TSS:
1772       if (! x86_64_guest) break;
1773       // fall through
1774     default:
1775       BX_ERROR(("VMENTER FAIL: VMCS guest incorrect TR type"));
1776       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1777   }
1778 
1779   //
1780   // Load and Check Guest State from VMCS - MSRS
1781   //
1782 
1783   guest.ia32_debugctl_msr = VMread64(VMCS_64BIT_GUEST_IA32_DEBUGCTL);
1784   guest.smbase = VMread32(VMCS_32BIT_GUEST_SMBASE);
1785 
1786   guest.sysenter_esp_msr = VMread_natural(VMCS_GUEST_IA32_SYSENTER_ESP_MSR);
1787   guest.sysenter_eip_msr = VMread_natural(VMCS_GUEST_IA32_SYSENTER_EIP_MSR);
1788   guest.sysenter_cs_msr = VMread32(VMCS_32BIT_GUEST_IA32_SYSENTER_CS_MSR);
1789 
1790 #if BX_SUPPORT_X86_64
1791   if (! IsCanonical(guest.sysenter_esp_msr)) {
1792     BX_ERROR(("VMENTER FAIL: VMCS guest SYSENTER_ESP_MSR non canonical"));
1793     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1794   }
1795   if (! IsCanonical(guest.sysenter_eip_msr)) {
1796     BX_ERROR(("VMENTER FAIL: VMCS guest SYSENTER_EIP_MSR non canonical"));
1797     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1798   }
1799 #endif
1800 
1801 #if BX_SUPPORT_VMX >= 2
1802   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_PAT_MSR) {
1803     guest.pat_msr = VMread64(VMCS_64BIT_GUEST_IA32_PAT);
1804     if (! isValidMSR_PAT(guest.pat_msr)) {
1805       BX_ERROR(("VMENTER FAIL: invalid Memory Type in guest MSR_PAT"));
1806       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1807     }
1808   }
1809 #endif
1810 
1811   guest.rip = VMread_natural(VMCS_GUEST_RIP);
1812   guest.rsp = VMread_natural(VMCS_GUEST_RSP);
1813 
1814 #if BX_SUPPORT_VMX >= 2 && BX_SUPPORT_X86_64
1815   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_EFER_MSR) {
1816     guest.efer_msr = VMread64(VMCS_64BIT_GUEST_IA32_EFER);
1817     if (guest.efer_msr & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
1818       BX_ERROR(("VMENTER FAIL: VMCS guest EFER reserved bits set !"));
1819       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1820     }
1821     bool lme = (guest.efer_msr >>  8) & 0x1;
1822     bool lma = (guest.efer_msr >> 10) & 0x1;
1823     if (lma != x86_64_guest) {
1824       BX_ERROR(("VMENTER FAIL: VMCS guest EFER.LMA doesn't match x86_64_guest !"));
1825       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1826     }
1827     if (lma != lme && (guest.cr0 & BX_CR0_PG_MASK) != 0) {
1828       BX_ERROR(("VMENTER FAIL: VMCS guest EFER (0x%08x) inconsistent value !", (Bit32u) guest.efer_msr));
1829       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1830     }
1831   }
1832 
1833   if (! x86_64_guest || !guest.sregs[BX_SEG_REG_CS].cache.u.segment.l) {
1834     if (GET32H(guest.rip) != 0) {
1835        BX_ERROR(("VMENTER FAIL: VMCS guest RIP > 32 bit"));
1836        return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1837     }
1838   }
1839 #endif
1840 
1841   //
1842   // Load and Check Guest Non-Registers State from VMCS
1843   //
1844 
1845   vm->vmcs_linkptr = VMread64(VMCS_64BIT_GUEST_LINK_POINTER);
1846   if (vm->vmcs_linkptr != BX_INVALID_VMCSPTR) {
1847     if (! IsValidPageAlignedPhyAddr(vm->vmcs_linkptr)) {
1848       *qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
1849       BX_ERROR(("VMFAIL: VMCS link pointer malformed"));
1850       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1851     }
1852 
1853     Bit32u revision = VMXReadRevisionID((bx_phy_address) vm->vmcs_linkptr);
1854     if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VMCS_SHADOWING) {
1855       if ((revision & BX_VMCS_SHADOW_BIT_MASK) == 0) {
1856         *qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
1857         BX_ERROR(("VMFAIL: VMCS link pointer must indicate shadow VMCS revision ID = %d", revision));
1858         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1859       }
1860       revision &= ~BX_VMCS_SHADOW_BIT_MASK;
1861     }
1862     if (revision != BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()) {
1863       *qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
1864       BX_ERROR(("VMFAIL: VMCS link pointer incorrect revision ID %d != %d", revision, BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()));
1865       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1866     }
1867 
1868     if (! BX_CPU_THIS_PTR in_smm || (vmentry_ctrls & VMX_VMENTRY_CTRL1_SMM_ENTER) != 0) {
1869       if (vm->vmcs_linkptr == BX_CPU_THIS_PTR vmcsptr) {
1870         *qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
1871         BX_ERROR(("VMFAIL: VMCS link pointer equal to current VMCS pointer"));
1872         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1873       }
1874     }
1875     else {
1876       if (vm->vmcs_linkptr == BX_CPU_THIS_PTR vmxonptr) {
1877         *qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
1878         BX_ERROR(("VMFAIL: VMCS link pointer equal to VMXON pointer"));
1879         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1880       }
1881     }
1882   }
1883 
1884   guest.tmpDR6 = (Bit32u) VMread_natural(VMCS_GUEST_PENDING_DBG_EXCEPTIONS);
1885   if (guest.tmpDR6 & BX_CONST64(0xFFFFFFFFFFFFAFF0)) {
1886     BX_ERROR(("VMENTER FAIL: VMCS guest tmpDR6 reserved bits"));
1887     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1888   }
1889 
1890   guest.activity_state = VMread32(VMCS_32BIT_GUEST_ACTIVITY_STATE);
1891   if (guest.activity_state > BX_VMX_LAST_ACTIVITY_STATE) {
1892     BX_ERROR(("VMENTER FAIL: VMCS guest activity state %d", guest.activity_state));
1893     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1894   }
1895 
1896   if (guest.activity_state == BX_ACTIVITY_STATE_HLT) {
1897     if (guest.sregs[BX_SEG_REG_SS].cache.dpl != 0) {
1898       BX_ERROR(("VMENTER FAIL: VMCS guest HLT state with SS.DPL=%d", guest.sregs[BX_SEG_REG_SS].cache.dpl));
1899       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1900     }
1901   }
1902 
1903   guest.interruptibility_state = VMread32(VMCS_32BIT_GUEST_INTERRUPTIBILITY_STATE);
1904   if (guest.interruptibility_state & ~BX_VMX_INTERRUPTIBILITY_STATE_MASK) {
1905     BX_ERROR(("VMENTER FAIL: VMCS guest interruptibility state broken"));
1906     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1907   }
1908 
1909   if (guest.interruptibility_state & 0x3) {
1910     if (guest.activity_state != BX_ACTIVITY_STATE_ACTIVE) {
1911       BX_ERROR(("VMENTER FAIL: VMCS guest interruptibility state broken when entering non active CPU state %d", guest.activity_state));
1912       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1913     }
1914   }
1915 
1916   if ((guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI) &&
1917       (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS))
1918   {
1919     BX_ERROR(("VMENTER FAIL: VMCS guest interruptibility state broken"));
1920     return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1921   }
1922 
1923   if ((guest.rflags & EFlagsIFMask) == 0) {
1924     if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI) {
1925       BX_ERROR(("VMENTER FAIL: VMCS guest interrupts can't be blocked by STI when EFLAGS.IF = 0"));
1926       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1927     }
1928   }
1929 
1930   if (VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info)) {
1931     unsigned event_type = (vm->vmentry_interr_info >> 8) & 7;
1932     unsigned vector = vm->vmentry_interr_info & 0xff;
1933     if (event_type == BX_EXTERNAL_INTERRUPT) {
1934       if ((guest.interruptibility_state & 0x3) != 0 || (guest.rflags & EFlagsIFMask) == 0) {
1935         BX_ERROR(("VMENTER FAIL: VMCS guest interrupts blocked when injecting external interrupt"));
1936         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1937       }
1938     }
1939     if (event_type == BX_NMI) {
1940       if ((guest.interruptibility_state & 0x3) != 0) {
1941         BX_ERROR(("VMENTER FAIL: VMCS guest interrupts blocked when injecting NMI"));
1942         return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1943       }
1944     }
1945     if (guest.activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
1946       BX_ERROR(("VMENTER FAIL: No guest interruptions are allowed when entering Wait-For-Sipi state"));
1947       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1948     }
1949     if (guest.activity_state == BX_ACTIVITY_STATE_SHUTDOWN && event_type != BX_NMI && vector != BX_MC_EXCEPTION) {
1950       BX_ERROR(("VMENTER FAIL: Only NMI or #MC guest interruption is allowed when entering shutdown state"));
1951       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1952     }
1953   }
1954 
1955   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_SMM_ENTER) {
1956     if (! (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED)) {
1957       BX_ERROR(("VMENTER FAIL: VMCS SMM guest should block SMI"));
1958       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1959     }
1960 
1961     if (guest.activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
1962       BX_ERROR(("VMENTER FAIL: The activity state must not indicate the wait-for-SIPI state if entering to SMM guest"));
1963       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1964     }
1965   }
1966 
1967   if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED) {
1968     if (! BX_CPU_THIS_PTR in_smm) {
1969       BX_ERROR(("VMENTER FAIL: VMCS SMI blocked when not in SMM mode"));
1970       return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1971     }
1972   }
1973 
1974   if (! x86_64_guest && (guest.cr4 & BX_CR4_PAE_MASK) != 0 && (guest.cr0 & BX_CR0_PG_MASK) != 0) {
1975 #if BX_SUPPORT_VMX >= 2
1976     if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
1977       for (n=0;n<4;n++)
1978          guest.pdptr[n] = VMread64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n);
1979 
1980       if (! CheckPDPTR(guest.pdptr)) {
1981          *qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
1982          BX_ERROR(("VMENTER: EPT Guest State PDPTRs Checks Failed"));
1983          return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1984       }
1985     }
1986     else
1987 #endif
1988     {
1989       if (! CheckPDPTR(guest.cr3)) {
1990          *qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
1991          BX_ERROR(("VMENTER: Guest State PDPTRs Checks Failed"));
1992          return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
1993       }
1994     }
1995   }
1996 
1997   //
1998   // Load Guest State -> VMENTER
1999   //
2000 
2001 #if BX_SUPPORT_X86_64
2002 #if BX_SUPPORT_VMX >= 2
2003   // modify EFER.LMA / EFER.LME before setting CR4
2004 
2005   // It is recommended that 64-bit VMM software use the 1-settings of the "load IA32_EFER"
2006   // VM entry control and the "save IA32_EFER" VM-exit control. If VMentry is establishing
2007   // CR0.PG=0 and if the "IA-32e mode guest" and "load IA32_EFER" VM entry controls are
2008   // both 0, VM entry leaves IA32_EFER.LME unmodified (i.e., the host value will persist
2009   // in the guest) -- Quote from Intel SDM
2010   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_EFER_MSR) {
2011      BX_CPU_THIS_PTR efer.set32((Bit32u) guest.efer_msr);
2012   }
2013   else
2014 #endif
2015   {
2016     if (x86_64_guest) {
2017       BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() | (BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
2018     }
2019     else {
2020       // when loading unrestricted guest with CR0.PG=0 EFER.LME is unmodified
2021       // (i.e., the host value will persist in the guest)
2022       if (guest.cr0 & BX_CR0_PG_MASK)
2023         BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() & ~(BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
2024       else
2025         BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() &  ~BX_EFER_LMA_MASK);
2026     }
2027   }
2028 #endif
2029 
2030 // keep bits ET(4), reserved bits 15:6, 17, 28:19, NW(29), CD(30)
2031 #define VMX_KEEP_CR0_BITS 0x7FFAFFD0
2032 
2033   guest.cr0 = (BX_CPU_THIS_PTR cr0.get32() & VMX_KEEP_CR0_BITS) | (guest.cr0 & ~VMX_KEEP_CR0_BITS);
2034 
2035   if (! check_CR0(guest.cr0)) {
2036     BX_PANIC(("VMENTER CR0 is broken !"));
2037   }
2038   if (! check_CR4(guest.cr4)) {
2039     BX_PANIC(("VMENTER CR4 is broken !"));
2040   }
2041 
2042   BX_CPU_THIS_PTR cr0.set32((Bit32u) guest.cr0);
2043   BX_CPU_THIS_PTR cr4.set32((Bit32u) guest.cr4);
2044   BX_CPU_THIS_PTR cr3 = guest.cr3;
2045 
2046 #if BX_SUPPORT_VMX >= 2
2047   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
2048     // load PDPTR only in PAE legacy mode
2049     if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !x86_64_guest) {
2050       for (n = 0; n < 4; n++)
2051         BX_CPU_THIS_PTR PDPTR_CACHE.entry[n] = guest.pdptr[n];
2052     }
2053   }
2054 #endif
2055 
2056   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_DBG_CTRLS) {
2057     // always clear bits 15:14 and set bit 10
2058     BX_CPU_THIS_PTR dr7.set32((guest.dr7 & ~0xc000) | 0x400);
2059   }
2060 
2061   RIP = BX_CPU_THIS_PTR prev_rip = guest.rip;
2062   RSP = guest.rsp;
2063 
2064 #if BX_SUPPORT_CET
2065   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_GUEST_CET_STATE) {
2066     SSP = guest.ssp;
2067     BX_CPU_THIS_PTR msr.ia32_interrupt_ssp_table = guest.interrupt_ssp_table_address;
2068     BX_CPU_THIS_PTR msr.ia32_cet_control[0] = guest.msr_ia32_s_cet;
2069   }
2070 #endif
2071 
2072 #if BX_SUPPORT_PKEYS
2073   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_GUEST_PKRS) {
2074     set_PKeys(BX_CPU_THIS_PTR pkru, guest.pkrs);
2075   }
2076 #endif
2077 
2078   BX_CPU_THIS_PTR async_event = 0;
2079 
2080   setEFlags((Bit32u) guest.rflags);
2081 
2082 #ifdef BX_SUPPORT_CS_LIMIT_DEMOTION
2083   // Handle special case of CS.LIMIT demotion (new descriptor limit is
2084   // smaller than current one)
2085   if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled > guest.sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
2086     BX_CPU_THIS_PTR iCache.flushICacheEntries();
2087 #endif
2088 
2089   for(unsigned segreg=0; segreg<6; segreg++)
2090     BX_CPU_THIS_PTR sregs[segreg] = guest.sregs[segreg];
2091 
2092   BX_CPU_THIS_PTR gdtr.base = gdtr_base;
2093   BX_CPU_THIS_PTR gdtr.limit = gdtr_limit;
2094   BX_CPU_THIS_PTR idtr.base = idtr_base;
2095   BX_CPU_THIS_PTR idtr.limit = idtr_limit;
2096 
2097   BX_CPU_THIS_PTR ldtr = guest.ldtr;
2098   BX_CPU_THIS_PTR tr = guest.tr;
2099 
2100   BX_CPU_THIS_PTR msr.sysenter_esp_msr = guest.sysenter_esp_msr;
2101   BX_CPU_THIS_PTR msr.sysenter_eip_msr = guest.sysenter_eip_msr;
2102   BX_CPU_THIS_PTR msr.sysenter_cs_msr  = guest.sysenter_cs_msr;
2103 
2104 #if BX_SUPPORT_VMX >= 2
2105   if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_PAT_MSR) {
2106     BX_CPU_THIS_PTR msr.pat = guest.pat_msr;
2107   }
2108   vm->ple.last_pause_time = vm->ple.first_pause_time = 0;
2109 #endif
2110 
2111   //
2112   // Load Guest Non-Registers State -> VMENTER
2113   //
2114 
2115   if (vm->vmentry_ctrls & VMX_VMENTRY_CTRL1_SMM_ENTER)
2116     BX_PANIC(("VMENTER: entry to SMM is not implemented yet !"));
2117 
2118   if (VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info)) {
2119     // the VMENTRY injecting event to the guest
2120     BX_CPU_THIS_PTR inhibit_mask = 0; // do not block interrupts
2121     BX_CPU_THIS_PTR debug_trap = 0;
2122     guest.activity_state = BX_ACTIVITY_STATE_ACTIVE;
2123   }
2124   else {
2125     if (guest.tmpDR6 & (1 << 12))
2126       BX_CPU_THIS_PTR debug_trap = guest.tmpDR6 & 0x0000400F;
2127     else
2128       BX_CPU_THIS_PTR debug_trap = guest.tmpDR6 & 0x00004000;
2129     if (BX_CPU_THIS_PTR debug_trap) {
2130       BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_TRAP_HIT;
2131       BX_CPU_THIS_PTR async_event = 1;
2132     }
2133 
2134     if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI)
2135       inhibit_interrupts(BX_INHIBIT_INTERRUPTS);
2136     else if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS)
2137       inhibit_interrupts(BX_INHIBIT_INTERRUPTS_BY_MOVSS);
2138     else
2139       BX_CPU_THIS_PTR inhibit_mask = 0;
2140   }
2141 
2142   if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED) {
2143     if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)
2144       mask_event(BX_EVENT_VMX_VIRTUAL_NMI);
2145     else
2146       mask_event(BX_EVENT_NMI);
2147   }
2148 
2149   if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_NMI_WINDOW_EXITING)
2150     signal_event(BX_EVENT_VMX_VIRTUAL_NMI);
2151 
2152   if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_INTERRUPT_WINDOW_VMEXIT)
2153     signal_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING);
2154 
2155   handleCpuContextChange();
2156 
2157 #if BX_SUPPORT_MONITOR_MWAIT
2158   BX_CPU_THIS_PTR monitor.reset_monitor();
2159 #endif
2160 
2161   BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_CONTEXT_SWITCH, 0);
2162 
2163   if (guest.activity_state) {
2164     BX_DEBUG(("VMEntry to non-active CPU state %d", guest.activity_state));
2165     enter_sleep_state(guest.activity_state);
2166   }
2167 
2168   return VMXERR_NO_ERROR;
2169 }
2170 
VMenterInjectEvents(void)2171 void BX_CPU_C::VMenterInjectEvents(void)
2172 {
2173   VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
2174 
2175   if (! VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info))
2176      return;
2177 
2178   /* the VMENTRY injecting event to the guest */
2179   unsigned vector = vm->vmentry_interr_info & 0xff;
2180   unsigned type = (vm->vmentry_interr_info >> 8) & 7;
2181   unsigned push_error = vm->vmentry_interr_info & (1 << 11);
2182   unsigned error_code = push_error ? vm->vmentry_excep_err_code : 0;
2183 
2184   if (type == 7) {
2185     if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_MONITOR_TRAP_FLAG)) {
2186       signal_event(BX_EVENT_VMX_MONITOR_TRAP_FLAG);
2187       return;
2188     }
2189   }
2190 
2191   bool is_INT = false;
2192   switch(type) {
2193     case BX_EXTERNAL_INTERRUPT:
2194     case BX_HARDWARE_EXCEPTION:
2195       BX_CPU_THIS_PTR EXT = 1;
2196       break;
2197 
2198     case BX_NMI:
2199       if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)
2200         mask_event(BX_EVENT_VMX_VIRTUAL_NMI);
2201       else
2202         mask_event(BX_EVENT_NMI);
2203 
2204       BX_CPU_THIS_PTR EXT = 1;
2205       break;
2206 
2207     case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
2208       BX_CPU_THIS_PTR EXT = 1;
2209       is_INT = true;
2210       break;
2211 
2212     case BX_SOFTWARE_INTERRUPT:
2213     case BX_SOFTWARE_EXCEPTION:
2214       is_INT = true;
2215       break;
2216 
2217     default:
2218       BX_PANIC(("VMENTER: unsupported event injection type %d !", type));
2219   }
2220 
2221   // keep prev_rip value/unwind in case of event delivery failure
2222   if (is_INT)
2223     RIP += vm->vmentry_instr_length;
2224 
2225   BX_DEBUG(("VMENTER: Injecting vector 0x%02x (error_code 0x%04x)", vector, error_code));
2226 
2227   if (type == BX_HARDWARE_EXCEPTION) {
2228     // record exception the same way as BX_CPU_C::exception does
2229     BX_ASSERT(vector < BX_CPU_HANDLED_EXCEPTIONS);
2230     BX_CPU_THIS_PTR last_exception_type = exceptions_info[vector].exception_type;
2231   }
2232 
2233   vm->idt_vector_info = vm->vmentry_interr_info & ~0x80000000;
2234   vm->idt_vector_error_code = error_code;
2235 
2236   interrupt(vector, type, push_error, error_code);
2237 
2238   BX_CPU_THIS_PTR last_exception_type = 0; // error resolved
2239 }
2240 
LoadMSRs(Bit32u msr_cnt,bx_phy_address pAddr)2241 Bit32u BX_CPU_C::LoadMSRs(Bit32u msr_cnt, bx_phy_address pAddr)
2242 {
2243   Bit64u msr_lo, msr_hi;
2244 
2245   for (Bit32u msr = 1; msr <= msr_cnt; msr++) {
2246     access_read_physical(pAddr,     8, &msr_lo);
2247     BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_VMX_LOAD_MSR_ACCESS, (Bit8u*)(&msr_lo));
2248     access_read_physical(pAddr + 8, 8, &msr_hi);
2249     BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + 8, 8, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_VMX_LOAD_MSR_ACCESS, (Bit8u*)(&msr_hi));
2250     pAddr += 16; // to next MSR
2251 
2252     if (GET32H(msr_lo)) {
2253       BX_ERROR(("VMX LoadMSRs %d: broken msr index 0x" FMT_LL "x", msr, msr_lo));
2254       return msr;
2255     }
2256 
2257     Bit32u index = GET32L(msr_lo);
2258 
2259 #if BX_SUPPORT_X86_64
2260     if (index == BX_MSR_FSBASE || index == BX_MSR_GSBASE) {
2261       BX_ERROR(("VMX LoadMSRs %d: unable to restore FSBASE or GSBASE", msr));
2262       return msr;
2263     }
2264 #endif
2265 
2266     if (is_cpu_extension_supported(BX_ISA_X2APIC)) {
2267       if (is_x2apic_msr_range(index)) {
2268         BX_ERROR(("VMX LoadMSRs %d: unable to restore X2APIC range MSR %x", msr, index));
2269         return msr;
2270       }
2271     }
2272 
2273     if (! wrmsr(index, msr_hi)) {
2274       BX_ERROR(("VMX LoadMSRs %d: unable to set up MSR %x", msr, index));
2275       return msr;
2276     }
2277   }
2278 
2279   return 0;
2280 }
2281 
StoreMSRs(Bit32u msr_cnt,bx_phy_address pAddr)2282 Bit32u BX_CPU_C::StoreMSRs(Bit32u msr_cnt, bx_phy_address pAddr)
2283 {
2284   Bit64u msr_lo, msr_hi;
2285 
2286   for (Bit32u msr = 1; msr <= msr_cnt; msr++) {
2287     access_read_physical(pAddr, 8, &msr_lo);
2288     BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)),
2289                                           BX_READ, BX_VMX_STORE_MSR_ACCESS, (Bit8u*)(&msr_lo));
2290 
2291     if (GET32H(msr_lo)) {
2292       BX_ERROR(("VMX StoreMSRs %d: broken msr index 0x" FMT_LL "x", msr, msr_lo));
2293       return msr;
2294     }
2295 
2296     Bit32u index = GET32L(msr_lo);
2297 
2298     if (is_cpu_extension_supported(BX_ISA_X2APIC)) {
2299       if (is_x2apic_msr_range(index)) {
2300         BX_ERROR(("VMX StoreMSRs %d: unable to save X2APIC range MSR %x", msr, index));
2301         return msr;
2302       }
2303     }
2304 
2305     if (! rdmsr(index, &msr_hi)) {
2306       BX_ERROR(("VMX StoreMSRs %d: unable to read MSR %x", msr, index));
2307       return msr;
2308     }
2309 
2310     access_write_physical(pAddr + 8, 8, &msr_hi);
2311     BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + 8, 8, MEMTYPE(resolve_memtype(pAddr)),
2312                                               BX_WRITE, BX_VMX_STORE_MSR_ACCESS, (Bit8u*)(&msr_hi));
2313 
2314     pAddr += 16; // to next MSR
2315   }
2316 
2317   return 0;
2318 }
2319 
2320 ////////////////////////////////////////////////////////////
2321 // VMexit
2322 ////////////////////////////////////////////////////////////
2323 
VMexitSaveGuestState(void)2324 void BX_CPU_C::VMexitSaveGuestState(void)
2325 {
2326   VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
2327   int n;
2328 
2329   VMwrite_natural(VMCS_GUEST_CR0, BX_CPU_THIS_PTR cr0.get32());
2330   VMwrite_natural(VMCS_GUEST_CR3, BX_CPU_THIS_PTR cr3);
2331   VMwrite_natural(VMCS_GUEST_CR4, BX_CPU_THIS_PTR cr4.get32());
2332 
2333 #if BX_SUPPORT_VMX >= 2
2334   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
2335     // save only if guest running in legacy PAE mode
2336     if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
2337       for(n=0; n<4; n++) {
2338         VMwrite64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n, BX_CPU_THIS_PTR PDPTR_CACHE.entry[n]);
2339       }
2340     }
2341   }
2342 #endif
2343 
2344   if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_SAVE_DBG_CTRLS)
2345      VMwrite_natural(VMCS_GUEST_DR7, BX_CPU_THIS_PTR dr7.get32());
2346 
2347   VMwrite_natural(VMCS_GUEST_RIP, RIP);
2348   VMwrite_natural(VMCS_GUEST_RSP, RSP);
2349   VMwrite_natural(VMCS_GUEST_RFLAGS, read_eflags());
2350 
2351 #if BX_SUPPORT_CET
2352   if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_CET)) {
2353     VMwrite_natural(VMCS_GUEST_IA32_S_CET, BX_CPU_THIS_PTR msr.ia32_cet_control[0]);
2354     VMwrite_natural(VMCS_GUEST_INTERRUPT_SSP_TABLE_ADDR, BX_CPU_THIS_PTR msr.ia32_interrupt_ssp_table);
2355     VMwrite_natural(VMCS_GUEST_SSP, SSP);
2356   }
2357 #endif
2358 
2359 #if BX_SUPPORT_PKEYS
2360   if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_PKS)) {
2361     VMwrite64(VMCS_64BIT_GUEST_IA32_PKRS, BX_CPU_THIS_PTR pkrs);
2362   }
2363 #endif
2364 
2365   for (n=0; n<6; n++) {
2366      Bit32u selector = BX_CPU_THIS_PTR sregs[n].selector.value;
2367      bool invalid = !BX_CPU_THIS_PTR sregs[n].cache.valid;
2368      bx_address base = BX_CPU_THIS_PTR sregs[n].cache.u.segment.base;
2369      Bit32u limit = BX_CPU_THIS_PTR sregs[n].cache.u.segment.limit_scaled;
2370      Bit32u ar = (get_descriptor_h(&BX_CPU_THIS_PTR sregs[n].cache) & 0x00f0ff00) >> 8;
2371      ar = vmx_pack_ar_field(ar | (invalid << 16), BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
2372 
2373      VMwrite16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n, selector);
2374      VMwrite32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n, ar);
2375      VMwrite_natural(VMCS_GUEST_ES_BASE + 2*n, base);
2376      VMwrite32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n, limit);
2377   }
2378 
2379   // save guest LDTR
2380   Bit32u ldtr_selector = BX_CPU_THIS_PTR ldtr.selector.value;
2381   bool ldtr_invalid = !BX_CPU_THIS_PTR ldtr.cache.valid;
2382   bx_address ldtr_base = BX_CPU_THIS_PTR ldtr.cache.u.segment.base;
2383   Bit32u ldtr_limit = BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled;
2384   Bit32u ldtr_ar = (get_descriptor_h(&BX_CPU_THIS_PTR ldtr.cache) & 0x00f0ff00) >> 8;
2385   ldtr_ar = vmx_pack_ar_field(ldtr_ar | (ldtr_invalid << 16), BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
2386 
2387   VMwrite16(VMCS_16BIT_GUEST_LDTR_SELECTOR, ldtr_selector);
2388   VMwrite32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS, ldtr_ar);
2389   VMwrite_natural(VMCS_GUEST_LDTR_BASE, ldtr_base);
2390   VMwrite32(VMCS_32BIT_GUEST_LDTR_LIMIT, ldtr_limit);
2391 
2392   // save guest TR
2393   Bit32u tr_selector = BX_CPU_THIS_PTR tr.selector.value;
2394   bool tr_invalid = !BX_CPU_THIS_PTR tr.cache.valid;
2395   bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
2396   Bit32u tr_limit = BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled;
2397   Bit32u tr_ar = (get_descriptor_h(&BX_CPU_THIS_PTR tr.cache) & 0x00f0ff00) >> 8;
2398   tr_ar = vmx_pack_ar_field(tr_ar | (tr_invalid << 16), BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
2399 
2400   VMwrite16(VMCS_16BIT_GUEST_TR_SELECTOR, tr_selector);
2401   VMwrite32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS, tr_ar);
2402   VMwrite_natural(VMCS_GUEST_TR_BASE, tr_base);
2403   VMwrite32(VMCS_32BIT_GUEST_TR_LIMIT, tr_limit);
2404 
2405   VMwrite_natural(VMCS_GUEST_GDTR_BASE, BX_CPU_THIS_PTR gdtr.base);
2406   VMwrite32(VMCS_32BIT_GUEST_GDTR_LIMIT, BX_CPU_THIS_PTR gdtr.limit);
2407   VMwrite_natural(VMCS_GUEST_IDTR_BASE, BX_CPU_THIS_PTR idtr.base);
2408   VMwrite32(VMCS_32BIT_GUEST_IDTR_LIMIT, BX_CPU_THIS_PTR idtr.limit);
2409 
2410   VMwrite_natural(VMCS_GUEST_IA32_SYSENTER_ESP_MSR, BX_CPU_THIS_PTR msr.sysenter_esp_msr);
2411   VMwrite_natural(VMCS_GUEST_IA32_SYSENTER_EIP_MSR, BX_CPU_THIS_PTR msr.sysenter_eip_msr);
2412   VMwrite32(VMCS_32BIT_GUEST_IA32_SYSENTER_CS_MSR, BX_CPU_THIS_PTR msr.sysenter_cs_msr);
2413 
2414 #if BX_SUPPORT_VMX >= 2
2415   if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_PAT_MSR)
2416     VMwrite64(VMCS_64BIT_GUEST_IA32_PAT, BX_CPU_THIS_PTR msr.pat.u64);
2417 #if BX_SUPPORT_X86_64
2418   if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_EFER_MSR)
2419     VMwrite64(VMCS_64BIT_GUEST_IA32_EFER, BX_CPU_THIS_PTR efer.get32());
2420 #endif
2421 #endif
2422 
2423   Bit32u tmpDR6 = BX_CPU_THIS_PTR debug_trap & 0x0000400f;
2424   if (tmpDR6 & 0xf) tmpDR6 |= (1 << 12);
2425   VMwrite_natural(VMCS_GUEST_PENDING_DBG_EXCEPTIONS, tmpDR6);
2426 
2427   // effectively wakeup from MWAIT state on VMEXIT
2428   if (BX_CPU_THIS_PTR activity_state >= BX_VMX_LAST_ACTIVITY_STATE)
2429     VMwrite32(VMCS_32BIT_GUEST_ACTIVITY_STATE, BX_ACTIVITY_STATE_ACTIVE);
2430   else
2431     VMwrite32(VMCS_32BIT_GUEST_ACTIVITY_STATE, BX_CPU_THIS_PTR activity_state);
2432 
2433   Bit32u interruptibility_state = 0;
2434   if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS)) {
2435      if (interrupts_inhibited(BX_INHIBIT_DEBUG))
2436         interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS;
2437      else
2438         interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_STI;
2439   }
2440 
2441   if (is_masked_event(BX_EVENT_SMI))
2442     interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED;
2443 
2444   if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) {
2445     if (is_masked_event(BX_EVENT_VMX_VIRTUAL_NMI))
2446       interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED;
2447   }
2448   else {
2449     if (is_masked_event(BX_EVENT_NMI))
2450       interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED;
2451   }
2452 
2453   VMwrite32(VMCS_32BIT_GUEST_INTERRUPTIBILITY_STATE, interruptibility_state);
2454 
2455 #if BX_SUPPORT_VMX >= 2
2456   if (VMX_MSR_MISC & VMX_MISC_STORE_LMA_TO_X86_64_GUEST_VMENTRY_CONTROL) {
2457     // VMEXITs store the value of EFER.LMA into the �x86-64 guest" VMENTRY control
2458     // must be set if unrestricted guest is supported
2459     if (long_mode())
2460        vm->vmentry_ctrls |=  VMX_VMENTRY_CTRL1_X86_64_GUEST;
2461     else
2462        vm->vmentry_ctrls &= ~VMX_VMENTRY_CTRL1_X86_64_GUEST;
2463 
2464     VMwrite32(VMCS_32BIT_CONTROL_VMENTRY_CONTROLS, vm->vmentry_ctrls);
2465   }
2466 
2467   // Deactivate VMX preemtion timer
2468   BX_CPU_THIS_PTR lapic.deactivate_vmx_preemption_timer();
2469   clear_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
2470   // Store back to VMCS
2471   if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_VMX_PREEMPTION_TIMER)
2472     VMwrite32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE, BX_CPU_THIS_PTR lapic.read_vmx_preemption_timer());
2473 
2474   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) {
2475     VMwrite16(VMCS_16BIT_GUEST_INTERRUPT_STATUS, (((Bit16u) vm->svi) << 8) | vm->rvi);
2476   }
2477 
2478   if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_PML_ENABLE) {
2479     VMwrite16(VMCS_16BIT_GUEST_PML_INDEX, vm->pml_index);
2480   }
2481 #endif
2482 }
2483 
VMexitLoadHostState(void)2484 void BX_CPU_C::VMexitLoadHostState(void)
2485 {
2486   VMCS_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcs.host_state;
2487   bool x86_64_host = false;
2488   BX_CPU_THIS_PTR tsc_offset = 0;
2489 
2490 #if BX_SUPPORT_X86_64
2491   Bit32u vmexit_ctrls = BX_CPU_THIS_PTR vmcs.vmexit_ctrls;
2492   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_HOST_ADDR_SPACE_SIZE) {
2493      BX_DEBUG(("VMEXIT to x86-64 host"));
2494      x86_64_host = true;
2495   }
2496 
2497 #if BX_SUPPORT_VMX >= 2
2498   // modify EFER.LMA / EFER.LME before setting CR4
2499   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_EFER_MSR) {
2500      BX_CPU_THIS_PTR efer.set32((Bit32u) host_state->efer_msr);
2501   }
2502   else
2503 #endif
2504   {
2505     if (x86_64_host)
2506        BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() |  (BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
2507     else
2508        BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() & ~(BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
2509   }
2510 #endif
2511 
2512   // ET, CD, NW, 28:19, 17, 15:6, and VMX fixed bits not modified Section 19.8
2513   host_state->cr0 = (BX_CPU_THIS_PTR cr0.get32() & VMX_KEEP_CR0_BITS) | (host_state->cr0 & ~VMX_KEEP_CR0_BITS);
2514 
2515   if (! check_CR0(host_state->cr0)) {
2516     BX_PANIC(("VMEXIT CR0 is broken !"));
2517   }
2518   if (! check_CR4(host_state->cr4)) {
2519     BX_PANIC(("VMEXIT CR4 is broken !"));
2520   }
2521 
2522   BX_CPU_THIS_PTR cr0.set32((Bit32u) host_state->cr0);
2523   BX_CPU_THIS_PTR cr4.set32((Bit32u) host_state->cr4);
2524   BX_CPU_THIS_PTR cr3 = host_state->cr3;
2525 
2526   if (! x86_64_host && BX_CPU_THIS_PTR cr4.get_PAE()) {
2527     if (! CheckPDPTR(host_state->cr3)) {
2528       BX_ERROR(("VMABORT: host PDPTRs are corrupted !"));
2529       VMabort(VMABORT_HOST_PDPTR_CORRUPTED);
2530     }
2531   }
2532 
2533   BX_CPU_THIS_PTR dr7.set32(0x00000400);
2534 
2535   BX_CPU_THIS_PTR msr.sysenter_cs_msr = host_state->sysenter_cs_msr;
2536   BX_CPU_THIS_PTR msr.sysenter_esp_msr = host_state->sysenter_esp_msr;
2537   BX_CPU_THIS_PTR msr.sysenter_eip_msr = host_state->sysenter_eip_msr;
2538 
2539 #if BX_SUPPORT_VMX >= 2
2540   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_PAT_MSR) {
2541     BX_CPU_THIS_PTR msr.pat = host_state->pat_msr;
2542   }
2543 #endif
2544 
2545   // CS selector loaded from VMCS
2546   //    valid   <= 1
2547   //    base    <= 0
2548   //    limit   <= 0xffffffff, g <= 1
2549   //    present <= 1
2550   //    dpl     <= 0
2551   //    type    <= segment, BX_CODE_EXEC_READ_ACCESSED
2552   //    d_b     <= loaded from 'host-address space size' VMEXIT control
2553   //    l       <= loaded from 'host-address space size' VMEXIT control
2554 
2555   parse_selector(host_state->segreg_selector[BX_SEG_REG_CS],
2556                &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
2557 
2558   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid    = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
2559   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p        = 1;
2560   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl      = 0;
2561   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment  = 1;  /* data/code segment */
2562   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type     = BX_CODE_EXEC_READ_ACCESSED;
2563 
2564   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;
2565   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xffffffff;
2566   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0;
2567   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g   = 1; /* page granular */
2568   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = !x86_64_host;
2569 #if BX_SUPPORT_X86_64
2570   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l   =  x86_64_host;
2571 #endif
2572 
2573   // DATA selector loaded from VMCS
2574   //    valid   <= if selector is not all-zero
2575   //    base    <= 0
2576   //    limit   <= 0xffffffff, g <= 1
2577   //    present <= 1
2578   //    dpl     <= 0
2579   //    type    <= segment, BX_DATA_READ_WRITE_ACCESSED
2580   //    d_b     <= 1
2581   //    l       <= 0
2582 
2583   for (unsigned segreg = 0; segreg < 6; segreg++)
2584   {
2585     if (segreg == BX_SEG_REG_CS) continue;
2586 
2587     parse_selector(host_state->segreg_selector[segreg],
2588                &BX_CPU_THIS_PTR sregs[segreg].selector);
2589 
2590     if (! host_state->segreg_selector[segreg]) {
2591        BX_CPU_THIS_PTR sregs[segreg].cache.valid    = 0;
2592     }
2593     else {
2594        BX_CPU_THIS_PTR sregs[segreg].cache.valid    = SegValidCache;
2595        BX_CPU_THIS_PTR sregs[segreg].cache.p        = 1;
2596        BX_CPU_THIS_PTR sregs[segreg].cache.dpl      = 0;
2597        BX_CPU_THIS_PTR sregs[segreg].cache.segment  = 1;  /* data/code segment */
2598        BX_CPU_THIS_PTR sregs[segreg].cache.type     = BX_DATA_READ_WRITE_ACCESSED;
2599        BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.base         = 0;
2600        BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.limit_scaled = 0xffffffff;
2601        BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.avl = 0;
2602        BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.g   = 1; /* page granular */
2603        BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.d_b = 1;
2604 #if BX_SUPPORT_X86_64
2605        BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.l   = 0;
2606 #endif
2607     }
2608   }
2609 
2610   // SS.DPL always clear
2611   BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0;
2612 
2613   if (x86_64_host || BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.valid)
2614     BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.u.segment.base = host_state->fs_base;
2615 
2616   if (x86_64_host || BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.valid)
2617     BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.u.segment.base = host_state->gs_base;
2618 
2619   // TR selector loaded from VMCS
2620   parse_selector(host_state->tr_selector, &BX_CPU_THIS_PTR tr.selector);
2621 
2622   BX_CPU_THIS_PTR tr.cache.valid    = SegValidCache; /* valid */
2623   BX_CPU_THIS_PTR tr.cache.p        = 1; /* present */
2624   BX_CPU_THIS_PTR tr.cache.dpl      = 0; /* field not used */
2625   BX_CPU_THIS_PTR tr.cache.segment  = 0; /* system segment */
2626   BX_CPU_THIS_PTR tr.cache.type     = BX_SYS_SEGMENT_BUSY_386_TSS;
2627   BX_CPU_THIS_PTR tr.cache.u.segment.base         = host_state->tr_base;
2628   BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled = 0x67;
2629   BX_CPU_THIS_PTR tr.cache.u.segment.avl = 0;
2630   BX_CPU_THIS_PTR tr.cache.u.segment.g   = 0; /* byte granular */
2631 
2632   // unusable LDTR
2633   BX_CPU_THIS_PTR ldtr.selector.value = 0x0000;
2634   BX_CPU_THIS_PTR ldtr.selector.index = 0x0000;
2635   BX_CPU_THIS_PTR ldtr.selector.ti    = 0;
2636   BX_CPU_THIS_PTR ldtr.selector.rpl   = 0;
2637   BX_CPU_THIS_PTR ldtr.cache.valid    = 0; /* invalid */
2638 
2639   BX_CPU_THIS_PTR gdtr.base = host_state->gdtr_base;
2640   BX_CPU_THIS_PTR gdtr.limit = 0xFFFF;
2641 
2642   BX_CPU_THIS_PTR idtr.base = host_state->idtr_base;
2643   BX_CPU_THIS_PTR idtr.limit = 0xFFFF;
2644 
2645   RIP = BX_CPU_THIS_PTR prev_rip = host_state->rip;
2646   RSP = host_state->rsp;
2647 
2648 #if BX_SUPPORT_CET
2649   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_HOST_CET_STATE) {
2650     SSP = host_state->ssp;
2651     BX_CPU_THIS_PTR msr.ia32_interrupt_ssp_table = host_state->interrupt_ssp_table_address;
2652     BX_CPU_THIS_PTR msr.ia32_cet_control[0] = host_state->msr_ia32_s_cet;
2653   }
2654 #endif
2655 
2656 #if BX_SUPPORT_PKEYS
2657   if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_HOST_PKRS) {
2658     set_PKeys(BX_CPU_THIS_PTR pkru, host_state->pkrs);
2659   }
2660 #endif
2661 
2662   BX_CPU_THIS_PTR inhibit_mask = 0;
2663   BX_CPU_THIS_PTR debug_trap = 0;
2664 
2665   // set flags directly, avoid setEFlags side effects
2666   BX_CPU_THIS_PTR eflags = 0x2; // Bit1 is always set
2667   // Update lazy flags state
2668   clearEFlagsOSZAPC();
2669 
2670   BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
2671 
2672   handleCpuContextChange();
2673 
2674 #if BX_SUPPORT_MONITOR_MWAIT
2675   BX_CPU_THIS_PTR monitor.reset_monitor();
2676 #endif
2677 
2678   BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_CONTEXT_SWITCH, 0);
2679 }
2680 
VMexit(Bit32u reason,Bit64u qualification)2681 void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
2682 {
2683   VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
2684 
2685   if (!BX_CPU_THIS_PTR in_vmx || !BX_CPU_THIS_PTR in_vmx_guest) {
2686     if ((reason & 0x80000000) == 0)
2687       BX_PANIC(("PANIC: VMEXIT not in VMX guest mode !"));
2688   }
2689 
2690   BX_INSTR_VMEXIT(BX_CPU_ID, reason, qualification);
2691 
2692   //
2693   // STEP 0: Update VMEXIT reason
2694   //
2695 
2696   VMwrite32(VMCS_32BIT_VMEXIT_REASON, reason);
2697   VMwrite_natural(VMCS_VMEXIT_QUALIFICATION, qualification);
2698 
2699   // clipping with 0xf not really necessary but keep it for safety
2700   VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_LENGTH, (RIP-BX_CPU_THIS_PTR prev_rip) & 0xf);
2701 
2702   reason &= 0xffff; /* keep only basic VMEXIT reason */
2703 
2704   if (reason >= VMX_VMEXIT_LAST_REASON)
2705     BX_PANIC(("PANIC: broken VMEXIT reason %d", reason));
2706   else
2707     BX_DEBUG(("VMEXIT reason = %d (%s) qualification=0x" FMT_LL "x", reason, VMX_vmexit_reason_name[reason], qualification));
2708 
2709   if (reason != VMX_VMEXIT_EXCEPTION_NMI && reason != VMX_VMEXIT_EXTERNAL_INTERRUPT) {
2710     VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_INFO, 0);
2711   }
2712 
2713   if (BX_CPU_THIS_PTR in_event) {
2714     VMwrite32(VMCS_32BIT_IDT_VECTORING_INFO, vm->idt_vector_info | 0x80000000);
2715     VMwrite32(VMCS_32BIT_IDT_VECTORING_ERR_CODE, vm->idt_vector_error_code);
2716     BX_CPU_THIS_PTR in_event = 0;
2717   }
2718   else {
2719     VMwrite32(VMCS_32BIT_IDT_VECTORING_INFO, 0);
2720   }
2721 
2722   BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
2723 
2724   // VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred
2725   if (! IS_TRAP_LIKE_VMEXIT(reason)) {
2726     RIP = BX_CPU_THIS_PTR prev_rip;
2727     if (BX_CPU_THIS_PTR speculative_rsp) {
2728       RSP = BX_CPU_THIS_PTR prev_rsp;
2729 #if BX_SUPPORT_CET
2730       SSP = BX_CPU_THIS_PTR prev_ssp;
2731 #endif
2732     }
2733   }
2734   BX_CPU_THIS_PTR speculative_rsp = 0;
2735 
2736   //
2737   // STEP 1: Saving Guest State to VMCS
2738   //
2739   if (reason != VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE && reason != VMX_VMEXIT_VMENTRY_FAILURE_MSR) {
2740     // clear VMENTRY interruption info field
2741     VMwrite32(VMCS_32BIT_CONTROL_VMENTRY_INTERRUPTION_INFO, vm->vmentry_interr_info & ~0x80000000);
2742 
2743     VMexitSaveGuestState();
2744 
2745     Bit32u msr = StoreMSRs(vm->vmexit_msr_store_cnt, vm->vmexit_msr_store_addr);
2746     if (msr) {
2747       BX_ERROR(("VMABORT: Error when saving guest MSR number %d", msr));
2748       VMabort(VMABORT_SAVING_GUEST_MSRS_FAILURE);
2749     }
2750   }
2751 
2752   BX_CPU_THIS_PTR in_vmx_guest = 0;
2753 
2754   // entering VMX root mode: clear possibly pending guest VMX events
2755   clear_event(BX_EVENT_VMX_VTPR_UPDATE |
2756               BX_EVENT_VMX_VEOI_UPDATE |
2757               BX_EVENT_VMX_VIRTUAL_APIC_WRITE |
2758               BX_EVENT_VMX_MONITOR_TRAP_FLAG |
2759               BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING |
2760               BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED |
2761               BX_EVENT_VMX_VIRTUAL_NMI |
2762               BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
2763 
2764   //
2765   // STEP 2: Load Host State
2766   //
2767   VMexitLoadHostState();
2768 
2769   //
2770   // STEP 3: Load Host MSR registers
2771   //
2772 
2773   Bit32u msr = LoadMSRs(vm->vmexit_msr_load_cnt, vm->vmexit_msr_load_addr);
2774   if (msr) {
2775     BX_ERROR(("VMABORT: Error when loading host MSR number %d", msr));
2776     VMabort(VMABORT_LOADING_HOST_MSRS);
2777   }
2778 
2779   //
2780   // STEP 4: Go back to VMX host
2781   //
2782 
2783   mask_event(BX_EVENT_INIT); // INIT is disabled in VMX root mode
2784 
2785   BX_CPU_THIS_PTR EXT = 0;
2786   BX_CPU_THIS_PTR last_exception_type = 0;
2787 
2788 #if BX_DEBUGGER
2789   if (BX_CPU_THIS_PTR vmexit_break) {
2790     BX_CPU_THIS_PTR stop_reason = STOP_VMEXIT_BREAK_POINT;
2791     bx_debug_break(); // trap into debugger
2792   }
2793 #endif
2794 
2795   if (! IS_TRAP_LIKE_VMEXIT(reason)) {
2796     longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
2797   }
2798 }
2799 
2800 #endif // BX_SUPPORT_VMX
2801 
2802 ////////////////////////////////////////////////////////////
2803 // VMX instructions
2804 ////////////////////////////////////////////////////////////
2805 
VMXON(bxInstruction_c * i)2806 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXON(bxInstruction_c *i)
2807 {
2808 #if BX_SUPPORT_VMX
2809   if (! BX_CPU_THIS_PTR cr4.get_VMXE() || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
2810     exception(BX_UD_EXCEPTION, 0);
2811 
2812   if (! BX_CPU_THIS_PTR in_vmx) {
2813     if (CPL != 0 || ! BX_CPU_THIS_PTR cr0.get_NE() ||
2814         ! (BX_CPU_THIS_PTR cr0.get_PE()) || BX_GET_ENABLE_A20() == 0 ||
2815         ! (BX_CPU_THIS_PTR msr.ia32_feature_ctrl & BX_IA32_FEATURE_CONTROL_LOCK_BIT) ||
2816         ! (BX_CPU_THIS_PTR msr.ia32_feature_ctrl & BX_IA32_FEATURE_CONTROL_VMX_ENABLE_BIT))
2817     {
2818       BX_ERROR(("#GP: VMXON is not allowed !"));
2819       exception(BX_GP_EXCEPTION, 0);
2820     }
2821 
2822     bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
2823     Bit64u pAddr = read_virtual_qword(i->seg(), eaddr); // keep 64-bit
2824     if (! IsValidPageAlignedPhyAddr(pAddr)) {
2825       BX_ERROR(("VMXON: invalid or not page aligned physical address !"));
2826       VMfailInvalid();
2827       BX_NEXT_INSTR(i);
2828     }
2829 
2830     // not allowed to be shadow VMCS
2831     Bit32u revision = VMXReadRevisionID((bx_phy_address) pAddr);
2832     if (revision != BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()) {
2833       BX_ERROR(("VMXON: not expected (%d != %d) VMCS revision id !", revision, BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()));
2834       VMfailInvalid();
2835       BX_NEXT_INSTR(i);
2836     }
2837 
2838     BX_CPU_THIS_PTR vmcsptr = BX_INVALID_VMCSPTR;
2839     BX_CPU_THIS_PTR vmcshostptr = 0;
2840     BX_CPU_THIS_PTR vmxonptr = pAddr;
2841     BX_CPU_THIS_PTR in_vmx = 1;
2842     mask_event(BX_EVENT_INIT); // INIT is disabled in VMX root mode
2843     // block and disable A20M;
2844 
2845 #if BX_SUPPORT_MONITOR_MWAIT
2846     BX_CPU_THIS_PTR monitor.reset_monitor();
2847 #endif
2848 
2849     VMsucceed();
2850   }
2851   else if (BX_CPU_THIS_PTR in_vmx_guest) { // in VMX non-root operation
2852     VMexit_Instruction(i, VMX_VMEXIT_VMXON);
2853   }
2854   else {
2855     // in VMX root operation mode
2856     if (CPL != 0) {
2857       BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
2858       exception(BX_GP_EXCEPTION, 0);
2859     }
2860 
2861     VMfail(VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
2862   }
2863 #endif
2864 
2865   BX_NEXT_INSTR(i);
2866 }
2867 
VMXOFF(bxInstruction_c * i)2868 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXOFF(bxInstruction_c *i)
2869 {
2870 #if BX_SUPPORT_VMX
2871   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
2872     exception(BX_UD_EXCEPTION, 0);
2873 
2874   if (BX_CPU_THIS_PTR in_vmx_guest) {
2875     VMexit(VMX_VMEXIT_VMXOFF, 0);
2876   }
2877 
2878   if (CPL != 0) {
2879     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
2880     exception(BX_GP_EXCEPTION, 0);
2881   }
2882 
2883 /*
2884         if dual-monitor treatment of SMIs and SMM is active
2885                 THEN VMfail(VMXERR_VMXOFF_WITH_CONFIGURED_SMM_MONITOR);
2886         else
2887 */
2888   {
2889     BX_CPU_THIS_PTR vmxonptr = BX_INVALID_VMCSPTR;
2890     BX_CPU_THIS_PTR in_vmx = 0;  // leave VMX operation mode
2891     unmask_event(BX_EVENT_INIT);
2892      // unblock and enable A20M;
2893 #if BX_SUPPORT_MONITOR_MWAIT
2894     BX_CPU_THIS_PTR monitor.reset_monitor();
2895 #endif
2896     VMsucceed();
2897   }
2898 #endif
2899 
2900   BX_NEXT_INSTR(i);
2901 }
2902 
VMCALL(bxInstruction_c * i)2903 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMCALL(bxInstruction_c *i)
2904 {
2905 #if BX_SUPPORT_VMX
2906   if (! BX_CPU_THIS_PTR in_vmx)
2907     exception(BX_UD_EXCEPTION, 0);
2908 
2909   if (BX_CPU_THIS_PTR in_vmx_guest) {
2910     VMexit(VMX_VMEXIT_VMCALL, 0);
2911   }
2912 
2913   if (BX_CPU_THIS_PTR get_VM() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
2914     exception(BX_UD_EXCEPTION, 0);
2915 
2916   if (CPL != 0) {
2917     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
2918     exception(BX_GP_EXCEPTION, 0);
2919   }
2920 
2921   if (BX_CPU_THIS_PTR in_smm /*||
2922         (the logical processor does not support the dual-monitor treatment of SMIs and SMM) ||
2923         (the valid bit in the IA32_SMM_MONITOR_CTL MSR is clear)*/)
2924   {
2925     VMfail(VMXERR_VMCALL_IN_VMX_ROOT_OPERATION);
2926     BX_NEXT_TRACE(i);
2927   }
2928 /*
2929         if dual-monitor treatment of SMIs and BX_CPU_THIS_PTR in_smm
2930                 THEN perform an SMM VMexit (see Section 24.16.2
2931                      of the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
2932 */
2933   if (BX_CPU_THIS_PTR vmcsptr == BX_INVALID_VMCSPTR) {
2934     BX_ERROR(("VMFAIL: VMCALL with invalid VMCS ptr"));
2935     VMfailInvalid();
2936     BX_NEXT_TRACE(i);
2937   }
2938 
2939   Bit32u launch_state = VMread32(VMCS_LAUNCH_STATE_FIELD_ENCODING);
2940   if (launch_state != VMCS_STATE_CLEAR) {
2941     BX_ERROR(("VMFAIL: VMCALL with launched VMCS"));
2942     VMfail(VMXERR_VMCALL_NON_CLEAR_VMCS);
2943     BX_NEXT_TRACE(i);
2944   }
2945 
2946   BX_PANIC(("VMCALL: not implemented yet"));
2947 /*
2948   if VM-exit control fields are not valid (see Section 24.16.6.1 of the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B)
2949       THEN VMfail(VMXERR_VMCALL_INVALID_VMEXIT_FIELD);
2950    else
2951       enter SMM;
2952       read revision identifier in MSEG;
2953       if revision identifier does not match that supported by processor
2954       THEN
2955           leave SMM;
2956           VMfailValid(VMXERR_VMCALL_INVALID_MSEG_REVISION_ID);
2957       else
2958           read SMM-monitor features field in MSEG (see Section 24.16.6.2,
2959           in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
2960           if features field is invalid
2961           THEN
2962               leave SMM;
2963               VMfailValid(VMXERR_VMCALL_WITH_INVALID_SMM_MONITOR_FEATURES);
2964           else activate dual-monitor treatment of SMIs and SMM (see Section 24.16.6
2965               in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
2966           FI;
2967       FI;
2968   FI;
2969 */
2970 #endif
2971 
2972   BX_NEXT_TRACE(i);
2973 }
2974 
VMLAUNCH(bxInstruction_c * i)2975 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i)
2976 {
2977 #if BX_SUPPORT_VMX
2978   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
2979     exception(BX_UD_EXCEPTION, 0);
2980 
2981   unsigned vmlaunch = 0;
2982   if ((i->getIaOpcode() == BX_IA_VMLAUNCH)) {
2983     BX_DEBUG(("VMLAUNCH VMCS ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcsptr));
2984     vmlaunch = 1;
2985   }
2986   else {
2987     BX_DEBUG(("VMRESUME VMCS ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcsptr));
2988   }
2989 
2990   if (BX_CPU_THIS_PTR in_vmx_guest) {
2991     VMexit(vmlaunch ? VMX_VMEXIT_VMLAUNCH : VMX_VMEXIT_VMRESUME, 0);
2992   }
2993 
2994   if (CPL != 0) {
2995     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
2996     exception(BX_GP_EXCEPTION, 0);
2997   }
2998 
2999   if (BX_CPU_THIS_PTR vmcsptr == BX_INVALID_VMCSPTR) {
3000     BX_ERROR(("VMFAIL: VMLAUNCH with invalid VMCS ptr !"));
3001     VMfailInvalid();
3002     BX_NEXT_TRACE(i);
3003   }
3004 
3005   if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS_BY_MOVSS)) {
3006     BX_ERROR(("VMFAIL: VMLAUNCH with interrupts blocked by MOV_SS !"));
3007     VMfail(VMXERR_VMENTRY_MOV_SS_BLOCKING);
3008     BX_NEXT_TRACE(i);
3009   }
3010 
3011   Bit32u launch_state = VMread32(VMCS_LAUNCH_STATE_FIELD_ENCODING);
3012   if (vmlaunch) {
3013     if (launch_state != VMCS_STATE_CLEAR) {
3014        BX_ERROR(("VMFAIL: VMLAUNCH with non-clear VMCS!"));
3015        VMfail(VMXERR_VMLAUNCH_NON_CLEAR_VMCS);
3016        BX_NEXT_TRACE(i);
3017     }
3018   }
3019   else {
3020     if (launch_state != VMCS_STATE_LAUNCHED) {
3021        BX_ERROR(("VMFAIL: VMRESUME with non-launched VMCS!"));
3022        VMfail(VMXERR_VMRESUME_NON_LAUNCHED_VMCS);
3023        BX_NEXT_TRACE(i);
3024     }
3025   }
3026 
3027   ///////////////////////////////////////////////////////
3028   // STEP 1: Load and Check VM-Execution Control Fields
3029   // STEP 2: Load and Check VM-Exit Control Fields
3030   // STEP 3: Load and Check VM-Entry Control Fields
3031   ///////////////////////////////////////////////////////
3032 
3033   VMX_error_code error = VMenterLoadCheckVmControls();
3034   if (error != VMXERR_NO_ERROR) {
3035     VMfail(error);
3036     BX_NEXT_TRACE(i);
3037   }
3038 
3039   ///////////////////////////////////////////////////////
3040   // STEP 4: Load and Check Host State
3041   ///////////////////////////////////////////////////////
3042 
3043   error = VMenterLoadCheckHostState();
3044   if (error != VMXERR_NO_ERROR) {
3045     VMfail(error);
3046     BX_NEXT_TRACE(i);
3047   }
3048 
3049   ///////////////////////////////////////////////////////
3050   // STEP 5: Load and Check Guest State
3051   ///////////////////////////////////////////////////////
3052 
3053   Bit64u qualification = VMENTER_ERR_NO_ERROR;
3054   Bit32u state_load_error = VMenterLoadCheckGuestState(&qualification);
3055   if (state_load_error) {
3056     BX_ERROR(("VMEXIT: Guest State Checks Failed"));
3057     VMexit(VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE | (1 << 31), qualification);
3058   }
3059 
3060   Bit32u msr = LoadMSRs(BX_CPU_THIS_PTR vmcs.vmentry_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_addr);
3061   if (msr) {
3062     BX_ERROR(("VMEXIT: Error when loading guest MSR number %d", msr));
3063     VMexit(VMX_VMEXIT_VMENTRY_FAILURE_MSR | (1 << 31), msr);
3064   }
3065 
3066   ///////////////////////////////////////////////////////
3067   // STEP 6: Update VMCS 'launched' state
3068   ///////////////////////////////////////////////////////
3069 
3070   if (vmlaunch) VMwrite32(VMCS_LAUNCH_STATE_FIELD_ENCODING, VMCS_STATE_LAUNCHED);
3071 
3072 /*
3073    Check settings of VMX controls and host-state area;
3074    if invalid settings
3075    THEN VMfailValid(VM entry with invalid VMX-control field(s)) or
3076         VMfailValid(VM entry with invalid host-state field(s)) or
3077         VMfailValid(VM entry with invalid executive-VMCS pointer)) or
3078         VMfailValid(VM entry with non-launched executive VMCS) or
3079         VMfailValid(VM entry with executive-VMCS pointer not VMXON pointer)
3080         VMfailValid(VM entry with invalid VM-execution control fields in executive VMCS)
3081    (as appropriate);
3082    else
3083         Attempt to load guest state and PDPTRs as appropriate;
3084         clear address-range monitoring;
3085         if failure in checking guest state or PDPTRs
3086         	THEN VM entry fails (see Section 22.7, in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
3087         else
3088                 Attempt to load MSRs from VM-entry MSR-load area;
3089                 if failure
3090                 	THEN VM entry fails (see Section 22.7, in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
3091                 else {
3092                         if VMLAUNCH
3093                         	THEN launch state of VMCS <== "launched";
3094                                 if in SMM and "entry to SMM" VM-entry control is 0
3095                                 THEN
3096                                 	if "deactivate dual-monitor treatment" VM-entry control is 0
3097                                         	THEN SMM-transfer VMCS pointer <== current-VMCS pointer;
3098                                         FI;
3099                                         if executive-VMCS pointer is VMX pointer
3100                                         	THEN current-VMCS pointer <== VMCS-link pointer;
3101                                         else current-VMCS pointer <== executive-VMCS pointer;
3102                                 FI;
3103                                 leave SMM;
3104                         FI;
3105                         VMsucceed();
3106                 }
3107          FI;
3108    FI;
3109 */
3110 
3111   BX_CPU_THIS_PTR in_vmx_guest = 1;
3112 
3113   unmask_event(BX_EVENT_INIT);
3114 
3115   if (VMEXIT(VMX_VM_EXEC_CTRL2_TSC_OFFSET))
3116     BX_CPU_THIS_PTR tsc_offset = VMread64(VMCS_64BIT_CONTROL_TSC_OFFSET);
3117   else
3118     BX_CPU_THIS_PTR tsc_offset = 0;
3119 
3120 #if BX_SUPPORT_VMX >= 2
3121   if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VMX_PREEMPTION_TIMER_VMEXIT)) {
3122     Bit32u timer_value = VMread32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE);
3123     if (timer_value == 0) {
3124       signal_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
3125     }
3126     else {
3127       // activate VMX preemption timer
3128       BX_DEBUG(("VMX preemption timer active"));
3129       BX_CPU_THIS_PTR lapic.set_vmx_preemption_timer(timer_value);
3130     }
3131   }
3132 #endif
3133 
3134   ///////////////////////////////////////////////////////
3135   // STEP 7: Inject events to the guest
3136   ///////////////////////////////////////////////////////
3137 
3138   VMenterInjectEvents();
3139 
3140 #if BX_SUPPORT_X86_64
3141   // - When virtual-interrupt-delivery is set this will cause PPR virtualization
3142   //   followed by Virtual Interrupt Evaluation
3143   // - When use TPR shadow together with Virtualize APIC Access are set this would
3144   //   cause TPR threshold check
3145   // - When Virtualize APIC Access is disabled the code would pass through TPR
3146   //   threshold check but no VMExit would occur (otherwise VMEntry should fail
3147   //   consistency checks before).
3148   if (VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW)) {
3149     VMX_TPR_Virtualization();
3150   }
3151 #endif
3152 
3153 #endif // BX_SUPPORT_VMX
3154 
3155   BX_NEXT_TRACE(i);
3156 }
3157 
VMPTRLD(bxInstruction_c * i)3158 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMPTRLD(bxInstruction_c *i)
3159 {
3160 #if BX_SUPPORT_VMX
3161   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3162     exception(BX_UD_EXCEPTION, 0);
3163 
3164   if (BX_CPU_THIS_PTR in_vmx_guest) {
3165     VMexit_Instruction(i, VMX_VMEXIT_VMPTRLD);
3166   }
3167 
3168   if (CPL != 0) {
3169     BX_ERROR(("VMPTRLD with CPL!=0 willcause #GP(0)"));
3170     exception(BX_GP_EXCEPTION, 0);
3171   }
3172 
3173   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
3174   Bit64u pAddr = read_virtual_qword(i->seg(), eaddr); // keep 64-bit
3175   if (! IsValidPageAlignedPhyAddr(pAddr)) {
3176     BX_ERROR(("VMFAIL: invalid or not page aligned physical address !"));
3177     VMfail(VMXERR_VMPTRLD_INVALID_PHYSICAL_ADDRESS);
3178     BX_NEXT_INSTR(i);
3179   }
3180 
3181   if (pAddr == BX_CPU_THIS_PTR vmxonptr) {
3182     BX_ERROR(("VMFAIL: VMPTRLD with VMXON ptr !"));
3183     VMfail(VMXERR_VMPTRLD_WITH_VMXON_PTR);
3184   }
3185   else {
3186     Bit32u revision = VMXReadRevisionID((bx_phy_address) pAddr);
3187 
3188     if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_VMCS_SHADOWING))
3189       revision &= ~BX_VMCS_SHADOW_BIT_MASK; // allowed to be shadow VMCS
3190 
3191     if (revision != BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()) {
3192        BX_ERROR(("VMPTRLD: not expected (%d != %d) VMCS revision id !", revision, BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()));
3193        VMfail(VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
3194     }
3195     else {
3196        set_VMCSPTR(pAddr);
3197        VMsucceed();
3198     }
3199   }
3200 #endif
3201 
3202   BX_NEXT_INSTR(i);
3203 }
3204 
VMPTRST(bxInstruction_c * i)3205 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMPTRST(bxInstruction_c *i)
3206 {
3207 #if BX_SUPPORT_VMX
3208   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3209     exception(BX_UD_EXCEPTION, 0);
3210 
3211   if (BX_CPU_THIS_PTR in_vmx_guest) {
3212     VMexit_Instruction(i, VMX_VMEXIT_VMPTRST);
3213   }
3214 
3215   if (CPL != 0) {
3216     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3217     exception(BX_GP_EXCEPTION, 0);
3218   }
3219 
3220   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
3221   write_virtual_qword(i->seg(), eaddr, BX_CPU_THIS_PTR vmcsptr);
3222   VMsucceed();
3223 #endif
3224 
3225   BX_NEXT_INSTR(i);
3226 }
3227 
3228 #if BX_SUPPORT_VMX
3229 
vmread(unsigned encoding)3230 Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::vmread(unsigned encoding)
3231 {
3232   unsigned width = VMCS_FIELD_WIDTH(encoding);
3233   Bit64u field_64;
3234 
3235   if(width == VMCS_FIELD_WIDTH_16BIT) {
3236     field_64 = VMread16(encoding);
3237   }
3238   else if(width == VMCS_FIELD_WIDTH_32BIT) {
3239     // the real hardware write access rights stored in packed format
3240     if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
3241       field_64 = vmx_unpack_ar_field(VMread32(encoding), BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
3242     else
3243       field_64 = VMread32(encoding);
3244   }
3245   else if(width == VMCS_FIELD_WIDTH_64BIT) {
3246     if (IS_VMCS_FIELD_HI(encoding))
3247       field_64 = VMread32(encoding);
3248     else
3249       field_64 = VMread64(encoding);
3250   }
3251   else {
3252     field_64 = VMread_natural(encoding);
3253   }
3254 
3255   return field_64;
3256 }
3257 
vmwrite(unsigned encoding,Bit64u val_64)3258 void BX_CPP_AttrRegparmN(2) BX_CPU_C::vmwrite(unsigned encoding, Bit64u val_64)
3259 {
3260   unsigned width = VMCS_FIELD_WIDTH(encoding);
3261   Bit32u val_32 = GET32L(val_64);
3262 
3263   if(width == VMCS_FIELD_WIDTH_16BIT) {
3264     VMwrite16(encoding, val_32 & 0xffff);
3265   }
3266   else if(width == VMCS_FIELD_WIDTH_32BIT) {
3267     // the real hardware write access rights stored in packed format
3268     if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
3269       if (BX_CPU_THIS_PTR vmcs_map->get_access_rights_format() == VMCS_AR_PACK)
3270         VMwrite16(encoding, (Bit16u) vmx_pack_ar_field(val_32, VMCS_AR_PACK));
3271       else
3272         VMwrite32(encoding, vmx_pack_ar_field(val_32, BX_CPU_THIS_PTR vmcs_map->get_access_rights_format()));
3273     else
3274       VMwrite32(encoding, val_32);
3275   }
3276   else if(width == VMCS_FIELD_WIDTH_64BIT) {
3277     if (IS_VMCS_FIELD_HI(encoding))
3278       VMwrite32(encoding, val_32);
3279     else
3280       VMwrite64(encoding, val_64);
3281   }
3282   else {
3283     VMwrite_natural(encoding, (bx_address) val_64);
3284   }
3285 }
3286 
3287 #if BX_SUPPORT_VMX >= 2
3288 
vmread_shadow(unsigned encoding)3289 Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::vmread_shadow(unsigned encoding)
3290 {
3291   unsigned width = VMCS_FIELD_WIDTH(encoding);
3292   Bit64u field_64;
3293 
3294   if(width == VMCS_FIELD_WIDTH_16BIT) {
3295     field_64 = VMread16_Shadow(encoding);
3296   }
3297   else if(width == VMCS_FIELD_WIDTH_32BIT) {
3298     // the real hardware write access rights stored in packed format
3299     if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
3300       field_64 = vmx_unpack_ar_field(VMread32_Shadow(encoding), BX_CPU_THIS_PTR vmcs_map->get_access_rights_format());
3301     else
3302       field_64 = VMread32_Shadow(encoding);
3303   }
3304   else if(width == VMCS_FIELD_WIDTH_64BIT) {
3305     if (IS_VMCS_FIELD_HI(encoding))
3306       field_64 = VMread32_Shadow(encoding);
3307     else
3308       field_64 = VMread64_Shadow(encoding);
3309   }
3310   else {
3311     field_64 = VMread64_Shadow(encoding);
3312   }
3313 
3314   return field_64;
3315 }
3316 
vmwrite_shadow(unsigned encoding,Bit64u val_64)3317 void BX_CPP_AttrRegparmN(2) BX_CPU_C::vmwrite_shadow(unsigned encoding, Bit64u val_64)
3318 {
3319   unsigned width = VMCS_FIELD_WIDTH(encoding);
3320   Bit32u val_32 = GET32L(val_64);
3321 
3322   if(width == VMCS_FIELD_WIDTH_16BIT) {
3323     VMwrite16_Shadow(encoding, val_32 & 0xffff);
3324   }
3325   else if(width == VMCS_FIELD_WIDTH_32BIT) {
3326     // the real hardware write access rights stored in packed format
3327     if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
3328       if (BX_CPU_THIS_PTR vmcs_map->get_access_rights_format() == VMCS_AR_PACK)
3329         VMwrite16_Shadow(encoding, (Bit16u) vmx_pack_ar_field(val_32, VMCS_AR_PACK));
3330       else
3331         VMwrite32_Shadow(encoding, vmx_pack_ar_field(val_32, BX_CPU_THIS_PTR vmcs_map->get_access_rights_format()));
3332     else
3333       VMwrite32_Shadow(encoding, val_32);
3334   }
3335   else if(width == VMCS_FIELD_WIDTH_64BIT) {
3336     if (IS_VMCS_FIELD_HI(encoding))
3337       VMwrite32_Shadow(encoding, val_32);
3338     else
3339       VMwrite64_Shadow(encoding, val_64);
3340   }
3341   else {
3342     VMwrite64_Shadow(encoding, val_64);
3343   }
3344 }
3345 
3346 #endif // BX_SUPPORT_VMX >= 2
3347 
3348 #endif
3349 
VMREAD_EdGd(bxInstruction_c * i)3350 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD_EdGd(bxInstruction_c *i)
3351 {
3352 #if BX_SUPPORT_VMX
3353   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3354     exception(BX_UD_EXCEPTION, 0);
3355 
3356   bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
3357 
3358   if (BX_CPU_THIS_PTR in_vmx_guest) {
3359 #if BX_SUPPORT_VMX >= 2
3360     if (Vmexit_Vmread(i))
3361 #endif
3362       VMexit_Instruction(i, VMX_VMEXIT_VMREAD, BX_READ);
3363 
3364     vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
3365   }
3366 
3367   if (CPL != 0) {
3368     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3369     exception(BX_GP_EXCEPTION, 0);
3370   }
3371 
3372   if (vmcs_pointer == BX_INVALID_VMCSPTR) {
3373     BX_ERROR(("VMFAIL: VMREAD with invalid VMCS ptr !"));
3374     VMfailInvalid();
3375     BX_NEXT_INSTR(i);
3376   }
3377 
3378   unsigned encoding = BX_READ_32BIT_REG(i->src());
3379 
3380   if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
3381     BX_ERROR(("VMREAD: not supported field 0x%08x", encoding));
3382     VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
3383     BX_NEXT_INSTR(i);
3384   }
3385 
3386   Bit32u field_32;
3387 #if BX_SUPPORT_VMX >= 2
3388   if (BX_CPU_THIS_PTR in_vmx_guest)
3389     field_32 = (Bit32u) vmread_shadow(encoding);
3390   else
3391 #endif
3392     field_32 = (Bit32u) vmread(encoding);
3393 
3394   if (i->modC0()) {
3395      BX_WRITE_32BIT_REGZ(i->dst(), field_32);
3396   }
3397   else {
3398      Bit32u eaddr = (Bit32u) BX_CPU_RESOLVE_ADDR(i);
3399      write_virtual_dword_32(i->seg(), eaddr, field_32);
3400   }
3401 
3402   VMsucceed();
3403 #endif
3404 
3405   BX_NEXT_INSTR(i);
3406 }
3407 
3408 #if BX_SUPPORT_X86_64
3409 
VMREAD_EqGq(bxInstruction_c * i)3410 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD_EqGq(bxInstruction_c *i)
3411 {
3412 #if BX_SUPPORT_VMX
3413   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3414     exception(BX_UD_EXCEPTION, 0);
3415 
3416   bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
3417 
3418   if (BX_CPU_THIS_PTR in_vmx_guest) {
3419 #if BX_SUPPORT_VMX >= 2
3420     if (Vmexit_Vmread(i))
3421 #endif
3422       VMexit_Instruction(i, VMX_VMEXIT_VMREAD, BX_READ);
3423 
3424     vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
3425   }
3426 
3427   if (CPL != 0) {
3428     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3429     exception(BX_GP_EXCEPTION, 0);
3430   }
3431 
3432   if (vmcs_pointer == BX_INVALID_VMCSPTR) {
3433     BX_ERROR(("VMFAIL: VMREAD with invalid VMCS ptr !"));
3434     VMfailInvalid();
3435     BX_NEXT_INSTR(i);
3436   }
3437 
3438   if (BX_READ_64BIT_REG_HIGH(i->src())) {
3439     BX_ERROR(("VMREAD: not supported field (upper 32-bit not zero)"));
3440     VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
3441     BX_NEXT_INSTR(i);
3442   }
3443   unsigned encoding = BX_READ_32BIT_REG(i->src());
3444 
3445   if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
3446     BX_ERROR(("VMREAD: not supported field 0x%08x", encoding));
3447     VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
3448     BX_NEXT_INSTR(i);
3449   }
3450 
3451   Bit64u field_64;
3452 #if BX_SUPPORT_VMX >= 2
3453   if (BX_CPU_THIS_PTR in_vmx_guest)
3454     field_64 = vmread_shadow(encoding);
3455   else
3456 #endif
3457     field_64 = vmread(encoding);
3458 
3459   if (i->modC0()) {
3460      BX_WRITE_64BIT_REG(i->dst(), field_64);
3461   }
3462   else {
3463      Bit64u eaddr = BX_CPU_RESOLVE_ADDR(i);
3464      write_linear_qword(i->seg(), get_laddr64(i->seg(), eaddr), field_64);
3465   }
3466 
3467   VMsucceed();
3468 #endif
3469 
3470   BX_NEXT_INSTR(i);
3471 }
3472 
3473 #endif
3474 
VMWRITE_GdEd(bxInstruction_c * i)3475 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE_GdEd(bxInstruction_c *i)
3476 {
3477 #if BX_SUPPORT_VMX
3478   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3479     exception(BX_UD_EXCEPTION, 0);
3480 
3481   bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
3482 
3483   if (BX_CPU_THIS_PTR in_vmx_guest) {
3484 #if BX_SUPPORT_VMX >= 2
3485     if (Vmexit_Vmwrite(i))
3486 #endif
3487       VMexit_Instruction(i, VMX_VMEXIT_VMWRITE, BX_WRITE);
3488 
3489     vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
3490   }
3491 
3492   if (CPL != 0) {
3493     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3494     exception(BX_GP_EXCEPTION, 0);
3495   }
3496 
3497   if (vmcs_pointer == BX_INVALID_VMCSPTR) {
3498     BX_ERROR(("VMFAIL: VMWRITE with invalid VMCS ptr !"));
3499     VMfailInvalid();
3500     BX_NEXT_INSTR(i);
3501   }
3502 
3503   Bit32u val_32;
3504 
3505   if (i->modC0()) {
3506      val_32 = BX_READ_32BIT_REG(i->src());
3507   }
3508   else {
3509      Bit32u eaddr = (Bit32u) BX_CPU_RESOLVE_ADDR(i);
3510      val_32 = read_virtual_dword_32(i->seg(), eaddr);
3511   }
3512 
3513   Bit32u encoding = BX_READ_32BIT_REG(i->dst());
3514 
3515   if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
3516     BX_ERROR(("VMWRITE: not supported field 0x%08x", encoding));
3517     VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
3518     BX_NEXT_INSTR(i);
3519   }
3520 
3521   if (VMCS_FIELD_TYPE(encoding) == VMCS_FIELD_TYPE_READ_ONLY)
3522   {
3523     if ((VMX_MSR_MISC & VMX_MISC_SUPPORT_VMWRITE_READ_ONLY_FIELDS) == 0) {
3524       BX_ERROR(("VMWRITE: write to read only field 0x%08x", encoding));
3525       VMfail(VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
3526       BX_NEXT_INSTR(i);
3527     }
3528   }
3529 
3530 #if BX_SUPPORT_VMX >= 2
3531   if (BX_CPU_THIS_PTR in_vmx_guest)
3532     vmwrite_shadow(encoding, (Bit64u) val_32);
3533   else
3534 #endif
3535     vmwrite(encoding, (Bit64u) val_32);
3536 
3537   VMsucceed();
3538 #endif
3539 
3540   BX_NEXT_INSTR(i);
3541 }
3542 
3543 #if BX_SUPPORT_X86_64
3544 
VMWRITE_GqEq(bxInstruction_c * i)3545 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE_GqEq(bxInstruction_c *i)
3546 {
3547 #if BX_SUPPORT_VMX
3548   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3549     exception(BX_UD_EXCEPTION, 0);
3550 
3551   bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
3552 
3553   if (BX_CPU_THIS_PTR in_vmx_guest) {
3554 #if BX_SUPPORT_VMX >= 2
3555     if (Vmexit_Vmwrite(i))
3556 #endif
3557       VMexit_Instruction(i, VMX_VMEXIT_VMWRITE, BX_WRITE);
3558 
3559     vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
3560   }
3561 
3562   if (CPL != 0) {
3563     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3564     exception(BX_GP_EXCEPTION, 0);
3565   }
3566 
3567   if (vmcs_pointer == BX_INVALID_VMCSPTR) {
3568     BX_ERROR(("VMFAIL: VMWRITE with invalid VMCS ptr !"));
3569     VMfailInvalid();
3570     BX_NEXT_INSTR(i);
3571   }
3572 
3573   Bit64u val_64;
3574 
3575   if (i->modC0()) {
3576      val_64 = BX_READ_64BIT_REG(i->src());
3577   }
3578   else {
3579      Bit64u eaddr = BX_CPU_RESOLVE_ADDR(i);
3580      val_64 = read_linear_qword(i->seg(), get_laddr64(i->seg(), eaddr));
3581   }
3582 
3583   if (BX_READ_64BIT_REG_HIGH(i->dst())) {
3584      BX_ERROR(("VMWRITE: not supported field (upper 32-bit not zero)"));
3585      VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
3586      BX_NEXT_INSTR(i);
3587   }
3588 
3589   Bit32u encoding = BX_READ_32BIT_REG(i->dst());
3590 
3591   if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
3592     BX_ERROR(("VMWRITE: not supported field 0x%08x", encoding));
3593     VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
3594     BX_NEXT_INSTR(i);
3595   }
3596 
3597   if (VMCS_FIELD_TYPE(encoding) == VMCS_FIELD_TYPE_READ_ONLY)
3598   {
3599     if ((VMX_MSR_MISC & VMX_MISC_SUPPORT_VMWRITE_READ_ONLY_FIELDS) == 0) {
3600       BX_ERROR(("VMWRITE: write to read only field 0x%08x", encoding));
3601       VMfail(VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
3602       BX_NEXT_INSTR(i);
3603     }
3604   }
3605 
3606 #if BX_SUPPORT_VMX >= 2
3607   if (BX_CPU_THIS_PTR in_vmx_guest)
3608     vmwrite_shadow(encoding, val_64);
3609   else
3610 #endif
3611     vmwrite(encoding, val_64);
3612 
3613   VMsucceed();
3614 #endif
3615 
3616   BX_NEXT_INSTR(i);
3617 }
3618 
3619 #endif
3620 
VMCLEAR(bxInstruction_c * i)3621 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMCLEAR(bxInstruction_c *i)
3622 {
3623 #if BX_SUPPORT_VMX
3624   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3625     exception(BX_UD_EXCEPTION, 0);
3626 
3627   if (BX_CPU_THIS_PTR in_vmx_guest) {
3628     VMexit_Instruction(i, VMX_VMEXIT_VMCLEAR);
3629   }
3630 
3631   if (CPL != 0) {
3632     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3633     exception(BX_GP_EXCEPTION, 0);
3634   }
3635 
3636   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
3637   Bit64u pAddr = read_virtual_qword(i->seg(), eaddr); // keep 64-bit
3638   if (! IsValidPageAlignedPhyAddr(pAddr)) {
3639     BX_ERROR(("VMFAIL: VMCLEAR with invalid physical address!"));
3640     VMfail(VMXERR_VMCLEAR_WITH_INVALID_ADDR);
3641     BX_NEXT_INSTR(i);
3642   }
3643 
3644   if (pAddr == BX_CPU_THIS_PTR vmxonptr) {
3645     BX_ERROR(("VMFAIL: VMLEAR with VMXON ptr !"));
3646     VMfail(VMXERR_VMCLEAR_WITH_VMXON_VMCS_PTR);
3647   }
3648   else {
3649     // ensure that data for VMCS referenced by the operand is in memory
3650     // initialize implementation-specific data in VMCS region
3651 
3652     // clear VMCS launch state
3653     unsigned launch_field_offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(VMCS_LAUNCH_STATE_FIELD_ENCODING);
3654     if(launch_field_offset >= VMX_VMCS_AREA_SIZE)
3655       BX_PANIC(("VMCLEAR: can't access VMCS_LAUNCH_STATE encoding, offset=0x%x", launch_field_offset));
3656 
3657     Bit32u launch_state = VMCS_STATE_CLEAR;
3658     access_write_physical(pAddr + launch_field_offset, 4, &launch_state);
3659     BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + launch_field_offset, 4,
3660             MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&launch_state));
3661 
3662     if (pAddr == BX_CPU_THIS_PTR vmcsptr) {
3663         BX_CPU_THIS_PTR vmcsptr = BX_INVALID_VMCSPTR;
3664         BX_CPU_THIS_PTR vmcshostptr = 0;
3665     }
3666 
3667     VMsucceed();
3668   }
3669 #endif
3670 
3671   BX_NEXT_INSTR(i);
3672 }
3673 
3674 #if BX_CPU_LEVEL >= 6
3675 
INVEPT(bxInstruction_c * i)3676 void BX_CPP_AttrRegparmN(1) BX_CPU_C::INVEPT(bxInstruction_c *i)
3677 {
3678 #if BX_SUPPORT_VMX >= 2
3679   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3680     exception(BX_UD_EXCEPTION, 0);
3681 
3682   if (BX_CPU_THIS_PTR in_vmx_guest) {
3683     VMexit_Instruction(i, VMX_VMEXIT_INVEPT, BX_WRITE);
3684   }
3685 
3686   if (CPL != 0) {
3687     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3688     exception(BX_GP_EXCEPTION, 0);
3689   }
3690 
3691   bx_address type;
3692   if (i->os64L()) {
3693     type = BX_READ_64BIT_REG(i->dst());
3694   }
3695   else {
3696     type = BX_READ_32BIT_REG(i->dst());
3697   }
3698 
3699   BxPackedXmmRegister inv_eptp;
3700   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
3701   read_virtual_xmmword(i->seg(), eaddr, &inv_eptp);
3702 
3703   switch(type) {
3704   case BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION:
3705      if (! is_eptptr_valid(inv_eptp.xmm64u(0))) {
3706        BX_ERROR(("INVEPT: invalid EPTPTR value !"));
3707        VMfail(VMXERR_INVALID_INVEPT_INVVPID);
3708        BX_NEXT_TRACE(i);
3709      }
3710      TLB_flush(); // Invalidate mappings associated with EPTP[51:12]
3711      break;
3712 
3713   case BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION:
3714      TLB_flush(); // Invalidate mappings associated with all EPTPs
3715      break;
3716 
3717   default:
3718      BX_ERROR(("INVEPT: not supported type !"));
3719      VMfail(VMXERR_INVALID_INVEPT_INVVPID);
3720      BX_NEXT_TRACE(i);
3721   }
3722 
3723   BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_INVEPT, type);
3724 
3725   VMsucceed();
3726 #else
3727   BX_INFO(("INVEPT: required VMXx2 support, use --enable-vmx=2 option"));
3728   exception(BX_UD_EXCEPTION, 0);
3729 #endif
3730 
3731   BX_NEXT_TRACE(i);
3732 }
3733 
INVVPID(bxInstruction_c * i)3734 void BX_CPP_AttrRegparmN(1) BX_CPU_C::INVVPID(bxInstruction_c *i)
3735 {
3736 #if BX_SUPPORT_VMX >= 2
3737   if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
3738     exception(BX_UD_EXCEPTION, 0);
3739 
3740   if (BX_CPU_THIS_PTR in_vmx_guest) {
3741     VMexit_Instruction(i, VMX_VMEXIT_INVVPID, BX_WRITE);
3742   }
3743 
3744   if (CPL != 0) {
3745     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3746     exception(BX_GP_EXCEPTION, 0);
3747   }
3748 
3749   bx_address type;
3750   if (i->os64L()) {
3751     type = BX_READ_64BIT_REG(i->dst());
3752   }
3753   else {
3754     type = BX_READ_32BIT_REG(i->dst());
3755   }
3756 
3757   BxPackedXmmRegister invvpid_desc;
3758   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
3759   read_virtual_xmmword(i->seg(), eaddr, &invvpid_desc);
3760 
3761   if (invvpid_desc.xmm64u(0) > 0xffff) {
3762     BX_ERROR(("INVVPID: INVVPID_DESC reserved bits set"));
3763     VMfail(VMXERR_INVALID_INVEPT_INVVPID);
3764     BX_NEXT_TRACE(i);
3765   }
3766 
3767   Bit16u vpid = invvpid_desc.xmm16u(0);
3768   if (vpid == 0 && type != BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION) {
3769     BX_ERROR(("INVVPID with VPID=0"));
3770     VMfail(VMXERR_INVALID_INVEPT_INVVPID);
3771     BX_NEXT_TRACE(i);
3772   }
3773 
3774   switch(type) {
3775   case BX_INVEPT_INVVPID_INDIVIDUAL_ADDRESS_INVALIDATION:
3776     if (! IsCanonical(invvpid_desc.xmm64u(1))) {
3777       BX_ERROR(("INVVPID: non canonical LADDR single context invalidation"));
3778       VMfail(VMXERR_INVALID_INVEPT_INVVPID);
3779       BX_NEXT_TRACE(i);
3780     }
3781 
3782     TLB_flush(); // invalidate all mappings for address LADDR tagged with VPID
3783     break;
3784 
3785   case BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION:
3786     TLB_flush(); // invalidate all mappings tagged with VPID
3787     break;
3788 
3789   case BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION:
3790     TLB_flush(); // invalidate all mappings tagged with VPID <> 0
3791     break;
3792 
3793   case BX_INVEPT_INVVPID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION:
3794     TLB_flushNonGlobal(); // invalidate all mappings tagged with VPID except globals
3795     break;
3796 
3797   default:
3798     BX_ERROR(("INVVPID: not supported type !"));
3799     VMfail(VMXERR_INVALID_INVEPT_INVVPID);
3800     BX_NEXT_TRACE(i);
3801   }
3802 
3803   BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_INVVPID, type);
3804 
3805   VMsucceed();
3806 #else
3807   BX_INFO(("INVVPID: required VMXx2 support, use --enable-vmx=2 option"));
3808   exception(BX_UD_EXCEPTION, 0);
3809 #endif
3810 
3811   BX_NEXT_TRACE(i);
3812 }
3813 
INVPCID(bxInstruction_c * i)3814 void BX_CPP_AttrRegparmN(1) BX_CPU_C::INVPCID(bxInstruction_c *i)
3815 {
3816 #if BX_SUPPORT_VMX
3817   // INVPCID will always #UD in legacy VMX mode, the #UD takes priority over any other exception the instruction may incur.
3818   if (BX_CPU_THIS_PTR in_vmx_guest) {
3819     if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_INVPCID)) {
3820        BX_ERROR(("INVPCID in VMX guest: not allowed to use instruction !"));
3821        exception(BX_UD_EXCEPTION, 0);
3822     }
3823   }
3824 #endif
3825 
3826   if (v8086_mode()) {
3827     BX_ERROR(("INVPCID: #GP - not recognized in v8086 mode"));
3828     exception(BX_GP_EXCEPTION, 0);
3829   }
3830 
3831 #if BX_SUPPORT_VMX >= 2
3832   // INVPCID will always #UD in legacy VMX mode
3833   if (BX_CPU_THIS_PTR in_vmx_guest) {
3834     if (VMEXIT(VMX_VM_EXEC_CTRL2_INVLPG_VMEXIT)) {
3835       VMexit_Instruction(i, VMX_VMEXIT_INVPCID, BX_WRITE);
3836     }
3837   }
3838 #endif
3839 
3840   if (CPL != 0) {
3841     BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
3842     exception(BX_GP_EXCEPTION, 0);
3843   }
3844 
3845   bx_address type;
3846 #if BX_SUPPORT_X86_64
3847   if (i->os64L()) {
3848     type = BX_READ_64BIT_REG(i->dst());
3849   }
3850   else
3851 #endif
3852   {
3853     type = BX_READ_32BIT_REG(i->dst());
3854   }
3855 
3856   BxPackedXmmRegister invpcid_desc;
3857   bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
3858   read_virtual_xmmword(i->seg(), eaddr, &invpcid_desc);
3859 
3860   if (invpcid_desc.xmm64u(0) > 0xfff) {
3861     BX_ERROR(("INVPCID: INVPCID_DESC reserved bits set"));
3862     exception(BX_GP_EXCEPTION, 0);
3863   }
3864 
3865   Bit16u pcid = invpcid_desc.xmm16u(0) & 0xfff;
3866 
3867   switch(type) {
3868   case BX_INVPCID_INDIVIDUAL_ADDRESS_NON_GLOBAL_INVALIDATION:
3869 #if BX_SUPPORT_X86_64
3870     if (! IsCanonical(invpcid_desc.xmm64u(1))) {
3871       BX_ERROR(("INVPCID: non canonical LADDR single context invalidation"));
3872       exception(BX_GP_EXCEPTION, 0);
3873     }
3874 #endif
3875     if (! BX_CPU_THIS_PTR cr4.get_PCIDE() && pcid != 0) {
3876       BX_ERROR(("INVPCID: invalid PCID"));
3877       exception(BX_GP_EXCEPTION, 0);
3878     }
3879     TLB_flushNonGlobal(); // Invalidate all mappings for LADDR tagged with PCID except globals
3880     break;
3881 
3882   case BX_INVPCID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION:
3883     if (! BX_CPU_THIS_PTR cr4.get_PCIDE() && pcid != 0) {
3884       BX_ERROR(("INVPCID: invalid PCID"));
3885       exception(BX_GP_EXCEPTION, 0);
3886     }
3887     TLB_flushNonGlobal(); // Invalidate all mappings tagged with PCID except globals
3888     break;
3889 
3890   case BX_INVPCID_ALL_CONTEXT_INVALIDATION:
3891     TLB_flush(); // Invalidate all mappings tagged with any PCID
3892     break;
3893 
3894   case BX_INVPCID_ALL_CONTEXT_NON_GLOBAL_INVALIDATION:
3895     TLB_flushNonGlobal(); // Invalidate all mappings tagged with any PCID except globals
3896     break;
3897 
3898   default:
3899     BX_ERROR(("INVPCID: not supported type !"));
3900     exception(BX_GP_EXCEPTION, 0);
3901   }
3902 
3903   BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_INVPCID, type);
3904 
3905   BX_NEXT_TRACE(i);
3906 }
3907 
3908 #endif
3909 
GETSEC(bxInstruction_c * i)3910 void BX_CPP_AttrRegparmN(1) BX_CPU_C::GETSEC(bxInstruction_c *i)
3911 {
3912 #if BX_CPU_LEVEL >= 6
3913   if (! BX_CPU_THIS_PTR cr4.get_SMXE())
3914     exception(BX_UD_EXCEPTION, 0);
3915 
3916 #if BX_SUPPORT_VMX
3917   if (BX_CPU_THIS_PTR in_vmx_guest) {
3918     VMexit(VMX_VMEXIT_GETSEC, 0);
3919   }
3920 #endif
3921 
3922   BX_PANIC(("GETSEC: SMX is not implemented yet !"));
3923 #endif
3924 
3925   BX_NEXT_TRACE(i);
3926 }
3927 
3928 #if BX_SUPPORT_VMX
register_vmx_state(bx_param_c * parent)3929 void BX_CPU_C::register_vmx_state(bx_param_c *parent)
3930 {
3931   if (! is_cpu_extension_supported(BX_ISA_VMX)) return;
3932 
3933   // register VMX state for save/restore param tree
3934   bx_list_c *vmx = new bx_list_c(parent, "VMX");
3935 
3936   BXRS_HEX_PARAM_FIELD(vmx, vmcsptr, BX_CPU_THIS_PTR vmcsptr);
3937   BXRS_HEX_PARAM_FIELD(vmx, vmxonptr, BX_CPU_THIS_PTR vmxonptr);
3938   BXRS_PARAM_BOOL(vmx, in_vmx, BX_CPU_THIS_PTR in_vmx);
3939   BXRS_PARAM_BOOL(vmx, in_vmx_guest, BX_CPU_THIS_PTR in_vmx_guest);
3940   BXRS_PARAM_BOOL(vmx, in_smm_vmx, BX_CPU_THIS_PTR in_smm_vmx);
3941   BXRS_PARAM_BOOL(vmx, in_smm_vmx_guest, BX_CPU_THIS_PTR in_smm_vmx_guest);
3942 
3943   bx_list_c *vmcache = new bx_list_c(vmx, "VMCS_CACHE");
3944 
3945   //
3946   // VM-Execution Control Fields
3947   //
3948 
3949   bx_list_c *vmexec_ctrls = new bx_list_c(vmcache, "VMEXEC_CTRLS");
3950 
3951   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls1, BX_CPU_THIS_PTR vmcs.vmexec_ctrls1);
3952   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls2, BX_CPU_THIS_PTR vmcs.vmexec_ctrls2);
3953   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls3, BX_CPU_THIS_PTR vmcs.vmexec_ctrls3);
3954   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_exceptions_bitmap, BX_CPU_THIS_PTR vmcs.vm_exceptions_bitmap);
3955   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, tsc_multiplier, BX_CPU_THIS_PTR vmcs.tsc_multiplier);
3956   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_pf_mask, BX_CPU_THIS_PTR vmcs.vm_pf_mask);
3957   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_pf_match, BX_CPU_THIS_PTR vmcs.vm_pf_match);
3958   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, io_bitmap_addr1, BX_CPU_THIS_PTR vmcs.io_bitmap_addr[0]);
3959   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, io_bitmap_addr2, BX_CPU_THIS_PTR vmcs.io_bitmap_addr[1]);
3960   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, msr_bitmap_addr, BX_CPU_THIS_PTR vmcs.msr_bitmap_addr);
3961   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr0_mask, BX_CPU_THIS_PTR vmcs.vm_cr0_mask);
3962   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr0_read_shadow, BX_CPU_THIS_PTR vmcs.vm_cr0_read_shadow);
3963   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr4_mask, BX_CPU_THIS_PTR vmcs.vm_cr4_mask);
3964   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr4_read_shadow, BX_CPU_THIS_PTR vmcs.vm_cr4_read_shadow);
3965   BXRS_DEC_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_cnt, BX_CPU_THIS_PTR vmcs.vm_cr3_target_cnt);
3966   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value1, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[0]);
3967   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value2, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[1]);
3968   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value3, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[2]);
3969   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value4, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[3]);
3970   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmcs_linkptr, BX_CPU_THIS_PTR vmcs.vmcs_linkptr);
3971 #if BX_SUPPORT_X86_64
3972   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, virtual_apic_page_addr, BX_CPU_THIS_PTR vmcs.virtual_apic_page_addr);
3973   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_tpr_threshold, BX_CPU_THIS_PTR vmcs.vm_tpr_threshold);
3974   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, apic_access_page, BX_CPU_THIS_PTR vmcs.apic_access_page);
3975   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, apic_access, BX_CPU_THIS_PTR vmcs.apic_access);
3976 #endif
3977 #if BX_SUPPORT_VMX >= 2
3978   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eptptr, BX_CPU_THIS_PTR vmcs.eptptr);
3979   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vpid, BX_CPU_THIS_PTR vmcs.vpid);
3980   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pml_address, BX_CPU_THIS_PTR vmcs.pml_address);
3981   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pml_index, BX_CPU_THIS_PTR vmcs.pml_index);
3982   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, spptp, BX_CPU_THIS_PTR vmcs.spptp);
3983 #endif
3984 #if BX_SUPPORT_VMX >= 2
3985   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pause_loop_exiting_gap, BX_CPU_THIS_PTR vmcs.ple.pause_loop_exiting_gap);
3986   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pause_loop_exiting_window, BX_CPU_THIS_PTR vmcs.ple.pause_loop_exiting_window);
3987   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, first_pause_time, BX_CPU_THIS_PTR vmcs.ple.first_pause_time);
3988   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, last_pause_time, BX_CPU_THIS_PTR vmcs.ple.last_pause_time);
3989 #endif
3990 #if BX_SUPPORT_VMX >= 2
3991   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, svi, BX_CPU_THIS_PTR vmcs.svi);
3992   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, rvi, BX_CPU_THIS_PTR vmcs.rvi);
3993   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vppr, BX_CPU_THIS_PTR vmcs.vppr);
3994   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap0, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[0]);
3995   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap1, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[1]);
3996   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap2, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[2]);
3997   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap3, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[3]);
3998   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap4, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[4]);
3999   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap5, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[5]);
4000   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap6, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[6]);
4001   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap7, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[7]);
4002 #endif
4003 #if BX_SUPPORT_VMX >= 2
4004   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmread_bitmap_addr, BX_CPU_THIS_PTR vmcs.vmread_bitmap_addr);
4005   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmwrite_bitmap_addr, BX_CPU_THIS_PTR vmcs.vmwrite_bitmap_addr);
4006 #endif
4007 #if BX_SUPPORT_VMX >= 2
4008   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, ve_info_addr, BX_CPU_THIS_PTR vmcs.ve_info_addr);
4009   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eptp_index, BX_CPU_THIS_PTR vmcs.eptp_index);
4010   BXRS_HEX_PARAM_FIELD(vmexec_ctrls, xss_exiting_bitmap, BX_CPU_THIS_PTR vmcs.xss_exiting_bitmap);
4011 #endif
4012 
4013   //
4014   // VM-Exit Control Fields
4015   //
4016 
4017   bx_list_c *vmexit_ctrls = new bx_list_c(vmcache, "VMEXIT_CTRLS");
4018 
4019   BXRS_HEX_PARAM_FIELD(vmexit_ctrls, vmexit_ctrls, BX_CPU_THIS_PTR vmcs.vmexit_ctrls);
4020   BXRS_DEC_PARAM_FIELD(vmexit_ctrls, vmexit_msr_store_cnt, BX_CPU_THIS_PTR vmcs.vmexit_msr_store_cnt);
4021   BXRS_HEX_PARAM_FIELD(vmexit_ctrls, vmexit_msr_store_addr, BX_CPU_THIS_PTR vmcs.vmexit_msr_store_addr);
4022   BXRS_DEC_PARAM_FIELD(vmexit_ctrls, vmexit_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmexit_msr_load_cnt);
4023   BXRS_HEX_PARAM_FIELD(vmexit_ctrls, vmexit_msr_load_addr, BX_CPU_THIS_PTR vmcs.vmexit_msr_load_addr);
4024 
4025   //
4026   // VM-Entry Control Fields
4027   //
4028 
4029   bx_list_c *vmentry_ctrls = new bx_list_c(vmcache, "VMENTRY_CTRLS");
4030 
4031   BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_ctrls, BX_CPU_THIS_PTR vmcs.vmentry_ctrls);
4032   BXRS_DEC_PARAM_FIELD(vmentry_ctrls, vmentry_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_cnt);
4033   BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_msr_load_addr, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_addr);
4034   BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_interr_info, BX_CPU_THIS_PTR vmcs.vmentry_interr_info);
4035   BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_excep_err_code, BX_CPU_THIS_PTR vmcs.vmentry_excep_err_code);
4036   BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_instr_length, BX_CPU_THIS_PTR vmcs.vmentry_instr_length);
4037 
4038   //
4039   // VMCS Host State
4040   //
4041 
4042   bx_list_c *host = new bx_list_c(vmcache, "HOST_STATE");
4043 
4044 #undef NEED_CPU_REG_SHORTCUTS
4045 
4046   BXRS_HEX_PARAM_FIELD(host, CR0, BX_CPU_THIS_PTR vmcs.host_state.cr0);
4047   BXRS_HEX_PARAM_FIELD(host, CR3, BX_CPU_THIS_PTR vmcs.host_state.cr3);
4048   BXRS_HEX_PARAM_FIELD(host, CR4, BX_CPU_THIS_PTR vmcs.host_state.cr4);
4049   BXRS_HEX_PARAM_FIELD(host, ES, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_ES]);
4050   BXRS_HEX_PARAM_FIELD(host, CS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_CS]);
4051   BXRS_HEX_PARAM_FIELD(host, SS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_SS]);
4052   BXRS_HEX_PARAM_FIELD(host, DS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_DS]);
4053   BXRS_HEX_PARAM_FIELD(host, FS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_FS]);
4054   BXRS_HEX_PARAM_FIELD(host, FS_BASE, BX_CPU_THIS_PTR vmcs.host_state.fs_base);
4055   BXRS_HEX_PARAM_FIELD(host, GS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_GS]);
4056   BXRS_HEX_PARAM_FIELD(host, GS_BASE, BX_CPU_THIS_PTR vmcs.host_state.gs_base);
4057   BXRS_HEX_PARAM_FIELD(host, GDTR_BASE, BX_CPU_THIS_PTR vmcs.host_state.gdtr_base);
4058   BXRS_HEX_PARAM_FIELD(host, IDTR_BASE, BX_CPU_THIS_PTR vmcs.host_state.idtr_base);
4059   BXRS_HEX_PARAM_FIELD(host, TR, BX_CPU_THIS_PTR vmcs.host_state.tr_selector);
4060   BXRS_HEX_PARAM_FIELD(host, TR_BASE, BX_CPU_THIS_PTR vmcs.host_state.tr_base);
4061   BXRS_HEX_PARAM_FIELD(host, RSP, BX_CPU_THIS_PTR vmcs.host_state.rsp);
4062   BXRS_HEX_PARAM_FIELD(host, RIP, BX_CPU_THIS_PTR vmcs.host_state.rip);
4063   BXRS_HEX_PARAM_FIELD(host, sysenter_esp_msr, BX_CPU_THIS_PTR vmcs.host_state.sysenter_esp_msr);
4064   BXRS_HEX_PARAM_FIELD(host, sysenter_eip_msr, BX_CPU_THIS_PTR vmcs.host_state.sysenter_eip_msr);
4065   BXRS_HEX_PARAM_FIELD(host, sysenter_cs_msr, BX_CPU_THIS_PTR vmcs.host_state.sysenter_cs_msr);
4066 #if BX_SUPPORT_VMX >= 2
4067   BXRS_HEX_PARAM_FIELD(host, pat_msr, BX_CPU_THIS_PTR vmcs.host_state.pat_msr);
4068 #if BX_SUPPORT_X86_64
4069   BXRS_HEX_PARAM_FIELD(host, efer_msr, BX_CPU_THIS_PTR vmcs.host_state.efer_msr);
4070 #endif
4071 #endif
4072 #if BX_SUPPORT_CET
4073   BXRS_HEX_PARAM_FIELD(host, ia32_s_cet_msr, BX_CPU_THIS_PTR vmcs.host_state.msr_ia32_s_cet);
4074   BXRS_HEX_PARAM_FIELD(host, SSP, BX_CPU_THIS_PTR vmcs.host_state.ssp);
4075   BXRS_HEX_PARAM_FIELD(host, interrupt_ssp_table_address, BX_CPU_THIS_PTR vmcs.host_state.interrupt_ssp_table_address);
4076 #endif
4077 #if BX_SUPPORT_PKEYS
4078   BXRS_HEX_PARAM_FIELD(host, pkrs, BX_CPU_THIS_PTR vmcs.host_state.pkrs);
4079 #endif
4080 }
4081 
4082 #endif // BX_SUPPORT_VMX
4083