1 /////////////////////////////////////////////////////////////////////////
2 // $Id: vmexit.cc 14300 2021-07-03 14:31:14Z sshwarts $
3 /////////////////////////////////////////////////////////////////////////
4 //
5 // Copyright (c) 2009-2015 Stanislav Shwartsman
6 // Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7 //
8 // This library is free software; you can redistribute it and/or
9 // modify it under the terms of the GNU Lesser General Public
10 // License as published by the Free Software Foundation; either
11 // version 2 of the License, or (at your option) any later version.
12 //
13 // This library is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Lesser General Public License for more details.
17 //
18 // You should have received a copy of the GNU Lesser General Public
19 // License along with this library; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21 //
22 /////////////////////////////////////////////////////////////////////////
23
24 #define NEED_CPU_REG_SHORTCUTS 1
25 #include "bochs.h"
26 #include "cpu.h"
27 #define LOG_THIS BX_CPU_THIS_PTR
28
29 #include "pc_system.h"
30
31 #if BX_SUPPORT_VMX
32
33 #include "decoder/ia_opcodes.h"
34
35 // BX_READ(0) form means nnn(), rm(); BX_WRITE(1) form means rm(), nnn()
gen_instruction_info(bxInstruction_c * i,Bit32u reason,bool rw_form)36 Bit32u gen_instruction_info(bxInstruction_c *i, Bit32u reason, bool rw_form)
37 {
38 Bit32u instr_info = 0;
39
40 switch(reason) {
41 case VMX_VMEXIT_VMREAD:
42 case VMX_VMEXIT_VMWRITE:
43 #if BX_SUPPORT_VMX >= 2
44 case VMX_VMEXIT_GDTR_IDTR_ACCESS:
45 case VMX_VMEXIT_LDTR_TR_ACCESS:
46 case VMX_VMEXIT_INVEPT:
47 case VMX_VMEXIT_INVVPID:
48 case VMX_VMEXIT_INVPCID:
49 #endif
50 if (rw_form == BX_WRITE)
51 instr_info |= i->dst() << 28;
52 else
53 instr_info |= i->src() << 28;
54 break;
55
56 case VMX_VMEXIT_RDRAND:
57 case VMX_VMEXIT_RDSEED:
58 // bits 12:11 hold operand size
59 if (i->os64L())
60 instr_info |= 1 << 12;
61 else if (i->as32L())
62 instr_info |= 1 << 11;
63 break;
64
65 default:
66 break;
67 }
68
69 // --------------------------------------
70 // instruction information field format
71 // --------------------------------------
72 //
73 // [01:00] | Memory operand scale field (encoded)
74 // [02:02] | Undefined
75 // [06:03] | Reg1, undefined when memory operand
76 // [09:07] | Memory operand address size
77 // [10:10] | Memory/Register format (0 - mem, 1 - reg)
78 // [14:11] | Reserved
79 // [17:15] | Memory operand segment register field
80 // [21:18] | Memory operand index field
81 // [22:22] | Memory operand index field invalid
82 // [26:23] | Memory operand base field
83 // [27:27] | Memory operand base field invalid
84 // [31:28] | Reg2, if exists
85 //
86 if (i->modC0()) {
87 // reg/reg format
88 instr_info |= (1 << 10);
89 if (rw_form == BX_WRITE)
90 instr_info |= i->src() << 3;
91 else
92 instr_info |= i->dst() << 3;
93 }
94 else {
95 // memory format
96 if (i->as64L())
97 instr_info |= 1 << 8;
98 else if (i->as32L())
99 instr_info |= 1 << 7;
100
101 instr_info |= i->seg() << 15;
102
103 // index field is always initialized because of gather but not always valid
104 if (i->sibIndex() != BX_NIL_REGISTER && i->sibIndex() != 4)
105 instr_info |= i->sibScale() | (i->sibIndex() << 18);
106 else
107 instr_info |= 1 << 22; // index invalid
108
109 if (i->sibBase() != BX_NIL_REGISTER)
110 instr_info |= i->sibBase() << 23;
111 else
112 instr_info |= 1 << 27; // base invalid
113 }
114
115 return instr_info;
116 }
117
VMexit_Instruction(bxInstruction_c * i,Bit32u reason,bool rw_form)118 void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_Instruction(bxInstruction_c *i, Bit32u reason, bool rw_form)
119 {
120 Bit64u qualification = 0;
121 Bit32u instr_info = 0;
122
123 switch(reason) {
124 case VMX_VMEXIT_VMREAD:
125 case VMX_VMEXIT_VMWRITE:
126 case VMX_VMEXIT_VMPTRLD:
127 case VMX_VMEXIT_VMPTRST:
128 case VMX_VMEXIT_VMCLEAR:
129 case VMX_VMEXIT_VMXON:
130 #if BX_SUPPORT_VMX >= 2
131 case VMX_VMEXIT_GDTR_IDTR_ACCESS:
132 case VMX_VMEXIT_LDTR_TR_ACCESS:
133 case VMX_VMEXIT_INVEPT:
134 case VMX_VMEXIT_INVVPID:
135 case VMX_VMEXIT_INVPCID:
136 case VMX_VMEXIT_XSAVES:
137 case VMX_VMEXIT_XRSTORS:
138 #endif
139 #if BX_SUPPORT_X86_64
140 if (long64_mode()) {
141 qualification = (Bit64u) i->displ32s();
142 if (i->sibBase() == BX_64BIT_REG_RIP)
143 qualification += RIP;
144 }
145 else
146 #endif
147 {
148 qualification = (Bit64u) ((Bit32u) i->displ32s());
149 qualification &= i->asize_mask();
150 }
151 // fall through
152
153 case VMX_VMEXIT_RDRAND:
154 case VMX_VMEXIT_RDSEED:
155 instr_info = gen_instruction_info(i, reason, rw_form);
156 VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_INFO, instr_info);
157 break;
158
159 default:
160 BX_PANIC(("VMexit_Instruction reason %d", reason));
161 }
162
163 VMexit(reason, qualification);
164 }
165
VMexit_PAUSE(void)166 void BX_CPU_C::VMexit_PAUSE(void)
167 {
168 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
169
170 if (VMEXIT(VMX_VM_EXEC_CTRL2_PAUSE_VMEXIT)) {
171 VMexit(VMX_VMEXIT_PAUSE, 0);
172 }
173
174 #if BX_SUPPORT_VMX >= 2
175 if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_PAUSE_LOOP_VMEXIT) && CPL == 0) {
176 VMX_PLE *ple = &BX_CPU_THIS_PTR vmcs.ple;
177 Bit64u currtime = bx_pc_system.time_ticks();
178 if ((currtime - ple->last_pause_time) > ple->pause_loop_exiting_gap) {
179 ple->first_pause_time = currtime;
180 }
181 else {
182 if ((currtime - ple->first_pause_time) > ple->pause_loop_exiting_window)
183 VMexit(VMX_VMEXIT_PAUSE, 0);
184 }
185 ple->last_pause_time = currtime;
186 }
187 #endif
188 }
189
VMexit_ExtInterrupt(void)190 void BX_CPU_C::VMexit_ExtInterrupt(void)
191 {
192 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
193
194 if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT)) {
195 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
196 if (! (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_INTA_ON_VMEXIT)) {
197 // interrupt wasn't acknowledged and still pending, interruption info is invalid
198 VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_INFO, 0);
199 VMexit(VMX_VMEXIT_EXTERNAL_INTERRUPT, 0);
200 }
201 }
202 }
203
VMexit_Event(unsigned type,unsigned vector,Bit16u errcode,bool errcode_valid,Bit64u qualification)204 void BX_CPU_C::VMexit_Event(unsigned type, unsigned vector, Bit16u errcode, bool errcode_valid, Bit64u qualification)
205 {
206 if (! BX_CPU_THIS_PTR in_vmx_guest) return;
207
208 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
209 bool vmexit = false;
210 VMX_vmexit_reason reason = VMX_VMEXIT_EXCEPTION_NMI;
211
212 switch(type) {
213 case BX_EXTERNAL_INTERRUPT:
214 reason = VMX_VMEXIT_EXTERNAL_INTERRUPT;
215 if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT))
216 vmexit = true;
217 break;
218
219 case BX_NMI:
220 if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_NMI_EXITING))
221 vmexit = true;
222 break;
223
224 case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
225 case BX_SOFTWARE_EXCEPTION:
226 case BX_HARDWARE_EXCEPTION:
227 BX_ASSERT(vector < BX_CPU_HANDLED_EXCEPTIONS);
228 if (vector == BX_PF_EXCEPTION) {
229 // page faults are specially treated
230 bool err_match = ((errcode & vm->vm_pf_mask) == vm->vm_pf_match);
231 bool bitmap = (vm->vm_exceptions_bitmap >> BX_PF_EXCEPTION) & 1;
232 vmexit = (err_match == bitmap);
233 }
234 else {
235 vmexit = (vm->vm_exceptions_bitmap >> vector) & 1;
236 }
237 break;
238
239 case BX_SOFTWARE_INTERRUPT:
240 break; // no VMEXIT on software interrupt
241
242 default:
243 BX_ERROR(("VMexit_Event: unknown event type %d", type));
244 }
245
246 // ----------------------------------------------------
247 // VMExit interruption info
248 // ----------------------------------------------------
249 // [07:00] | Interrupt/Exception vector
250 // [10:08] | Interrupt/Exception type
251 // [11:11] | error code pushed to the stack
252 // [12:12] | NMI unblocking due to IRET
253 // [30:13] | reserved
254 // [31:31] | interruption info valid
255 //
256
257 if (! vmexit) {
258 // record IDT vectoring information
259 vm->idt_vector_error_code = errcode;
260 vm->idt_vector_info = vector | (type << 8);
261 if (errcode_valid)
262 vm->idt_vector_info |= (1 << 11); // error code delivered
263
264 BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
265 return;
266 }
267
268 BX_DEBUG(("VMEXIT: event vector 0x%02x type %d error code=0x%04x", vector, type, errcode));
269
270 // VMEXIT is not considered to occur during event delivery if it results
271 // in a double fault exception that causes VMEXIT directly
272 if (vector == BX_DF_EXCEPTION)
273 BX_CPU_THIS_PTR in_event = 0; // clear in_event indication on #DF
274
275 if (vector == BX_DB_EXCEPTION) {
276 // qualifcation for debug exceptions similar to debug_trap field
277 qualification = BX_CPU_THIS_PTR debug_trap & 0x0000600f;
278 }
279
280 // clear debug_trap field
281 BX_CPU_THIS_PTR debug_trap = 0;
282 BX_CPU_THIS_PTR inhibit_mask = 0;
283
284 Bit32u interruption_info = vector | (type << 8);
285 if (errcode_valid)
286 interruption_info |= (1 << 11); // error code delivered
287 interruption_info |= (1 << 31); // valid
288
289 if (BX_CPU_THIS_PTR nmi_unblocking_iret)
290 interruption_info |= (1 << 12);
291
292 VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_INFO, interruption_info);
293 VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_ERR_CODE, errcode);
294
295 VMexit(reason, qualification);
296 }
297
VMexit_TripleFault(void)298 void BX_CPU_C::VMexit_TripleFault(void)
299 {
300 if (! BX_CPU_THIS_PTR in_vmx_guest) return;
301
302 // VMEXIT is not considered to occur during event delivery if it results
303 // in a triple fault exception (that causes VMEXIT directly)
304 BX_CPU_THIS_PTR in_event = 0;
305
306 VMexit(VMX_VMEXIT_TRIPLE_FAULT, 0);
307 }
308
VMexit_TaskSwitch(Bit16u tss_selector,unsigned source)309 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_TaskSwitch(Bit16u tss_selector, unsigned source)
310 {
311 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
312
313 VMexit(VMX_VMEXIT_TASK_SWITCH, tss_selector | (source << 30));
314 }
315
316 const Bit32u BX_VMX_LO_MSR_START = 0x00000000;
317 const Bit32u BX_VMX_LO_MSR_END = 0x00001FFF;
318 const Bit32u BX_VMX_HI_MSR_START = 0xC0000000;
319 const Bit32u BX_VMX_HI_MSR_END = 0xC0001FFF;
320
VMexit_MSR(unsigned op,Bit32u msr)321 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_MSR(unsigned op, Bit32u msr)
322 {
323 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
324
325 bool vmexit = false;
326 if (! VMEXIT(VMX_VM_EXEC_CTRL2_MSR_BITMAPS)) vmexit = true;
327 else {
328 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
329 Bit8u field;
330
331 if (msr >= BX_VMX_HI_MSR_START) {
332 if (msr > BX_VMX_HI_MSR_END) vmexit = true;
333 else {
334 // check MSR-HI bitmaps
335 bx_phy_address pAddr = vm->msr_bitmap_addr + ((msr - BX_VMX_HI_MSR_START) >> 3) + 1024 + ((op == VMX_VMEXIT_RDMSR) ? 0 : 2048);
336 access_read_physical(pAddr, 1, &field);
337 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_MSR_BITMAP_ACCESS, &field);
338 if (field & (1 << (msr & 7)))
339 vmexit = true;
340 }
341 }
342 else {
343 if (msr > BX_VMX_LO_MSR_END) vmexit = true;
344 else {
345 // check MSR-LO bitmaps
346 bx_phy_address pAddr = vm->msr_bitmap_addr + (msr >> 3) + ((op == VMX_VMEXIT_RDMSR) ? 0 : 2048);
347 access_read_physical(pAddr, 1, &field);
348 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_MSR_BITMAP_ACCESS, &field);
349 if (field & (1 << (msr & 7)))
350 vmexit = true;
351 }
352 }
353 }
354
355 if (vmexit) {
356 BX_DEBUG(("VMEXIT: %sMSR 0x%08x", (op == VMX_VMEXIT_RDMSR) ? "RD" : "WR", msr));
357 VMexit(op, 0);
358 }
359 }
360
361 #define VMX_VMEXIT_IO_PORTIN (1 << 3)
362 #define VMX_VMEXIT_IO_INSTR_STRING (1 << 4)
363 #define VMX_VMEXIT_IO_INSTR_REP (1 << 5)
364 #define VMX_VMEXIT_IO_INSTR_IMM (1 << 6)
365
VMexit_IO(bxInstruction_c * i,unsigned port,unsigned len)366 void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_IO(bxInstruction_c *i, unsigned port, unsigned len)
367 {
368 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
369 BX_ASSERT(port <= 0xFFFF);
370
371 bool vmexit = false;
372
373 if (VMEXIT(VMX_VM_EXEC_CTRL2_IO_BITMAPS)) {
374 // always VMEXIT on port "wrap around" case
375 if ((port + len) > 0x10000) vmexit = true;
376 else {
377 Bit8u bitmap[2];
378 bx_phy_address pAddr;
379
380 if ((port & 0x7fff) + len > 0x8000) {
381 // special case - the IO access split cross both I/O bitmaps
382 pAddr = BX_CPU_THIS_PTR vmcs.io_bitmap_addr[0] + 0xfff;
383 access_read_physical(pAddr, 1, &bitmap[0]);
384 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_IO_BITMAP_ACCESS, &bitmap[0]);
385
386 pAddr = BX_CPU_THIS_PTR vmcs.io_bitmap_addr[1];
387 access_read_physical(pAddr, 1, &bitmap[1]);
388 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_IO_BITMAP_ACCESS, &bitmap[1]);
389 }
390 else {
391 // access_read_physical cannot read 2 bytes cross 4K boundary :(
392 pAddr = BX_CPU_THIS_PTR vmcs.io_bitmap_addr[(port >> 15) & 1] + ((port & 0x7fff) / 8);
393 access_read_physical(pAddr, 1, &bitmap[0]);
394 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_IO_BITMAP_ACCESS, &bitmap[0]);
395
396 pAddr++;
397 access_read_physical(pAddr, 1, &bitmap[1]);
398 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_IO_BITMAP_ACCESS, &bitmap[1]);
399 }
400
401 Bit16u combined_bitmap = bitmap[1];
402 combined_bitmap = (combined_bitmap << 8) | bitmap[0];
403
404 unsigned mask = ((1 << len) - 1) << (port & 7);
405 if (combined_bitmap & mask) vmexit = true;
406 }
407 }
408 else if (VMEXIT(VMX_VM_EXEC_CTRL2_IO_VMEXIT)) vmexit = true;
409
410 if (vmexit) {
411 BX_DEBUG(("VMEXIT: I/O port 0x%04x", port));
412
413 Bit32u qualification = 0;
414
415 switch(i->getIaOpcode()) {
416 case BX_IA_IN_ALIb:
417 case BX_IA_IN_AXIb:
418 case BX_IA_IN_EAXIb:
419 qualification = VMX_VMEXIT_IO_PORTIN | VMX_VMEXIT_IO_INSTR_IMM;
420 break;
421
422 case BX_IA_OUT_IbAL:
423 case BX_IA_OUT_IbAX:
424 case BX_IA_OUT_IbEAX:
425 qualification = VMX_VMEXIT_IO_INSTR_IMM;
426 break;
427
428 case BX_IA_IN_ALDX:
429 case BX_IA_IN_AXDX:
430 case BX_IA_IN_EAXDX:
431 qualification = VMX_VMEXIT_IO_PORTIN; // no immediate
432 break;
433
434 case BX_IA_OUT_DXAL:
435 case BX_IA_OUT_DXAX:
436 case BX_IA_OUT_DXEAX:
437 qualification = 0; // PORTOUT, no immediate
438 break;
439
440 case BX_IA_REP_INSB_YbDX:
441 case BX_IA_REP_INSW_YwDX:
442 case BX_IA_REP_INSD_YdDX:
443 qualification = VMX_VMEXIT_IO_PORTIN | VMX_VMEXIT_IO_INSTR_STRING;
444 if (i->repUsedL())
445 qualification |= VMX_VMEXIT_IO_INSTR_REP;
446 break;
447
448 case BX_IA_REP_OUTSB_DXXb:
449 case BX_IA_REP_OUTSW_DXXw:
450 case BX_IA_REP_OUTSD_DXXd:
451 qualification = VMX_VMEXIT_IO_INSTR_STRING; // PORTOUT
452 if (i->repUsedL())
453 qualification |= VMX_VMEXIT_IO_INSTR_REP;
454 break;
455
456 default:
457 BX_PANIC(("VMexit_IO: I/O instruction %s unknown", i->getIaOpcodeNameShort()));
458 }
459
460 if (qualification & VMX_VMEXIT_IO_INSTR_STRING) {
461 bx_address asize_mask = (bx_address) i->asize_mask(), laddr;
462
463 if (qualification & VMX_VMEXIT_IO_PORTIN)
464 laddr = get_laddr(BX_SEG_REG_ES, RDI & asize_mask);
465 else // PORTOUT
466 laddr = get_laddr(i->seg(), RSI & asize_mask);
467
468 VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, laddr);
469
470 Bit32u instruction_info = i->seg() << 15;
471 if (i->as64L())
472 instruction_info |= (1 << 8);
473 else if (i->as32L())
474 instruction_info |= (1 << 7);
475
476 VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_INFO, instruction_info);
477 }
478
479 VMexit(VMX_VMEXIT_IO_INSTRUCTION, qualification | (len-1) | (port << 16));
480 }
481 }
482
483 //
484 // ----------------------------------------------------------------
485 // Exit qualification for CR access
486 // ----------------------------------------------------------------
487 // [03:00] | Number of CR register (CR0, CR3, CR4, CR8)
488 // [05:04] | CR access type (0 - MOV to CR, 1 - MOV from CR, 2 - CLTS, 3 - LMSW)
489 // [06:06] | LMSW operand reg/mem (cleared for CR access and CLTS)
490 // [07:07] | reserved
491 // [11:08] | Source Operand Register for CR access (cleared for CLTS and LMSW)
492 // [15:12] | reserved
493 // [31:16] | LMSW source data (cleared for CR access and CLTS)
494 // [63:32] | reserved
495 //
496
VMexit_CLTS(void)497 bool BX_CPU_C::VMexit_CLTS(void)
498 {
499 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
500
501 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
502
503 if (vm->vm_cr0_mask & vm->vm_cr0_read_shadow & 0x8)
504 {
505 // all rest of the fields cleared to zero
506 Bit64u qualification = VMX_VMEXIT_CR_ACCESS_CLTS << 4;
507
508 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
509 }
510
511 if ((vm->vm_cr0_mask & 0x8) != 0 && (vm->vm_cr0_read_shadow & 0x8) == 0)
512 return 1; /* do not clear CR0.TS */
513 else
514 return 0;
515 }
516
VMexit_LMSW(bxInstruction_c * i,Bit32u msw)517 Bit32u BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_LMSW(bxInstruction_c *i, Bit32u msw)
518 {
519 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
520
521 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
522 Bit32u mask = vm->vm_cr0_mask & 0xF; /* LMSW affects only low 4 bits */
523 bool vmexit = false;
524
525 if ((mask & msw & 0x1) != 0 && (vm->vm_cr0_read_shadow & 0x1) == 0)
526 vmexit = true;
527
528 if ((mask & vm->vm_cr0_read_shadow & 0xE) != (mask & msw & 0xE))
529 vmexit = true;
530
531 if (vmexit) {
532 BX_DEBUG(("VMEXIT: CR0 write by LMSW of value 0x%04x", msw));
533
534 Bit64u qualification = VMX_VMEXIT_CR_ACCESS_LMSW << 4;
535 qualification |= msw << 16;
536 if (! i->modC0()) {
537 qualification |= (1 << 6); // memory operand
538 VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, get_laddr(i->seg(), RMAddr(i)));
539 }
540
541 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
542 }
543
544 // keep untouched all the bits set in CR0 mask
545 return (BX_CPU_THIS_PTR cr0.get32() & mask) | (msw & ~mask);
546 }
547
VMexit_CR0_Write(bxInstruction_c * i,bx_address val)548 bx_address BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_CR0_Write(bxInstruction_c *i, bx_address val)
549 {
550 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
551
552 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
553
554 if ((vm->vm_cr0_mask & vm->vm_cr0_read_shadow) != (vm->vm_cr0_mask & val))
555 {
556 BX_DEBUG(("VMEXIT: CR0 write"));
557 Bit64u qualification = i->src() << 8;
558 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
559 }
560
561 // keep untouched all the bits set in CR0 mask
562 return (BX_CPU_THIS_PTR cr0.get32() & vm->vm_cr0_mask) | (val & ~vm->vm_cr0_mask);
563 }
564
VMexit_CR3_Read(bxInstruction_c * i)565 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CR3_Read(bxInstruction_c *i)
566 {
567 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
568
569 if (VMEXIT(VMX_VM_EXEC_CTRL2_CR3_READ_VMEXIT)) {
570 BX_DEBUG(("VMEXIT: CR3 read"));
571 Bit64u qualification = 3 | (VMX_VMEXIT_CR_ACCESS_CR_READ << 4) | (i->dst() << 8);
572 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
573 }
574 }
575
VMexit_CR3_Write(bxInstruction_c * i,bx_address val)576 void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_CR3_Write(bxInstruction_c *i, bx_address val)
577 {
578 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
579
580 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
581
582 if (VMEXIT(VMX_VM_EXEC_CTRL2_CR3_WRITE_VMEXIT)) {
583 for (unsigned n=0; n < vm->vm_cr3_target_cnt; n++) {
584 if (vm->vm_cr3_target_value[n] == val) return;
585 }
586
587 BX_DEBUG(("VMEXIT: CR3 write"));
588 Bit64u qualification = 3 | (i->src() << 8);
589 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
590 }
591 }
592
VMexit_CR4_Write(bxInstruction_c * i,bx_address val)593 bx_address BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_CR4_Write(bxInstruction_c *i, bx_address val)
594 {
595 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
596
597 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
598
599 if ((vm->vm_cr4_mask & vm->vm_cr4_read_shadow) != (vm->vm_cr4_mask & val))
600 {
601 BX_DEBUG(("VMEXIT: CR4 write"));
602 Bit64u qualification = 4 | (i->src() << 8);
603 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
604 }
605
606 // keep untouched all the bits set in CR4 mask
607 return (BX_CPU_THIS_PTR cr4.get32() & vm->vm_cr4_mask) | (val & ~vm->vm_cr4_mask);
608 }
609
VMexit_CR8_Read(bxInstruction_c * i)610 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CR8_Read(bxInstruction_c *i)
611 {
612 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
613
614 if (VMEXIT(VMX_VM_EXEC_CTRL2_CR8_READ_VMEXIT)) {
615 BX_DEBUG(("VMEXIT: CR8 read"));
616 Bit64u qualification = 8 | (VMX_VMEXIT_CR_ACCESS_CR_READ << 4) | (i->dst() << 8);
617 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
618 }
619 }
620
VMexit_CR8_Write(bxInstruction_c * i)621 void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CR8_Write(bxInstruction_c *i)
622 {
623 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
624
625 if (VMEXIT(VMX_VM_EXEC_CTRL2_CR8_WRITE_VMEXIT)) {
626 BX_DEBUG(("VMEXIT: CR8 write"));
627 Bit64u qualification = 8 | (i->src() << 8);
628 VMexit(VMX_VMEXIT_CR_ACCESS, qualification);
629 }
630 }
631
632 //
633 // ----------------------------------------------------------------
634 // Exit qualification for DR access
635 // ----------------------------------------------------------------
636 // [03:00] | Number of DR register
637 // [04:04] | DR access type (0 - MOV to DR, 1 - MOV from DR)
638 // [07:05] | reserved
639 // [11:08] | Source Operand Register
640 // [63:12] | reserved
641 //
642
VMexit_DR_Access(unsigned read,unsigned dr,unsigned reg)643 void BX_CPU_C::VMexit_DR_Access(unsigned read, unsigned dr, unsigned reg)
644 {
645 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
646
647 if (VMEXIT(VMX_VM_EXEC_CTRL2_DRx_ACCESS_VMEXIT))
648 {
649 BX_DEBUG(("VMEXIT: DR%d %s access", dr, read ? "READ" : "WRITE"));
650
651 Bit64u qualification = dr | (reg << 8);
652 if (read)
653 qualification |= (1 << 4);
654
655 VMexit(VMX_VMEXIT_DR_ACCESS, qualification);
656 }
657 }
658
659 #if BX_SUPPORT_VMX >= 2
VMX_Get_Current_VPID(void)660 Bit16u BX_CPU_C::VMX_Get_Current_VPID(void)
661 {
662 if (! BX_CPU_THIS_PTR in_vmx_guest || !SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VPID_ENABLE))
663 return 0;
664
665 return BX_CPU_THIS_PTR vmcs.vpid;
666 }
667 #endif
668
669 #if BX_SUPPORT_VMX >= 2
Vmexit_Vmread(bxInstruction_c * i)670 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::Vmexit_Vmread(bxInstruction_c *i)
671 {
672 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
673
674 if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VMCS_SHADOWING)) return true;
675
676 #if BX_SUPPORT_X86_64
677 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
678 if (BX_READ_64BIT_REG_HIGH(i->src())) return true;
679 }
680 #endif
681 unsigned encoding = BX_READ_32BIT_REG(i->src());
682 if (encoding > 0x7fff) return true;
683
684 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
685
686 Bit8u bitmap;
687 bx_phy_address pAddr = vm->vmread_bitmap_addr | (encoding >> 3);
688 access_read_physical(pAddr, 1, &bitmap);
689 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_VMREAD_BITMAP_ACCESS, &bitmap);
690
691 if (bitmap & (1 << (encoding & 7)))
692 return true;
693
694 return false;
695 }
696
Vmexit_Vmwrite(bxInstruction_c * i)697 bool BX_CPP_AttrRegparmN(1) BX_CPU_C::Vmexit_Vmwrite(bxInstruction_c *i)
698 {
699 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
700
701 if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VMCS_SHADOWING)) return true;
702
703 #if BX_SUPPORT_X86_64
704 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
705 if (BX_READ_64BIT_REG_HIGH(i->dst())) return true;
706 }
707 #endif
708 unsigned encoding = BX_READ_32BIT_REG(i->dst());
709 if (encoding > 0x7fff) return true;
710
711 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
712
713 Bit8u bitmap;
714 bx_phy_address pAddr = vm->vmwrite_bitmap_addr | (encoding >> 3);
715 access_read_physical(pAddr, 1, &bitmap);
716 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 1, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_VMWRITE_BITMAP_ACCESS, &bitmap);
717
718 if (bitmap & (1 << (encoding & 7)))
719 return true;
720
721 return false;
722 }
723
Virtualization_Exception(Bit64u qualification,Bit64u guest_physical,Bit64u guest_linear)724 void BX_CPU_C::Virtualization_Exception(Bit64u qualification, Bit64u guest_physical, Bit64u guest_linear)
725 {
726 BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
727
728 // A convertible EPT violation causes a virtualization exception if the following all hold:
729 // - CR0.PE is set
730 // - the logical processor is not in the process of delivering an event through the IDT
731 // - the 32 bits at offset 4 in the virtualization-exception information area are all 0
732
733 if (! BX_CPU_THIS_PTR cr0.get_PE() || BX_CPU_THIS_PTR in_event) return;
734
735 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
736
737 Bit32u magic;
738 access_read_physical(vm->ve_info_addr + 4, 4, &magic);
739 #if BX_SUPPORT_MEMTYPE
740 BxMemtype ve_info_memtype = resolve_memtype(vm->ve_info_addr);
741 #endif
742 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr + 4, 4, MEMTYPE(ve_info_memtype), BX_READ, 0, (Bit8u*)(&magic));
743 if (magic != 0) return;
744
745 struct ve_info {
746 Bit32u reason; // always VMX_VMEXIT_EPT_VIOLATION
747 Bit32u magic;
748 Bit64u qualification;
749 Bit64u guest_linear_addr;
750 Bit64u guest_physical_addr;
751 Bit16u eptp_index;
752 } ve_info = { VMX_VMEXIT_EPT_VIOLATION, 0xffffffff, qualification, guest_linear, guest_physical, vm->eptp_index };
753
754 access_write_physical(vm->ve_info_addr, 4, &ve_info.reason);
755 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr, 4, MEMTYPE(ve_info_memtype), BX_WRITE, 0, (Bit8u*)(&ve_info.reason));
756
757 access_write_physical(vm->ve_info_addr + 4, 4, &ve_info.magic);
758 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr + 4, 4, MEMTYPE(ve_info_memtype), BX_WRITE, 0, (Bit8u*)(&ve_info.magic));
759
760 access_write_physical(vm->ve_info_addr + 8, 8, &ve_info.qualification);
761 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr + 8, 8, MEMTYPE(ve_info_memtype), BX_WRITE, 0, (Bit8u*)(&ve_info.qualification));
762
763 access_write_physical(vm->ve_info_addr + 16, 8, &ve_info.guest_linear_addr);
764 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr + 16, 8, MEMTYPE(ve_info_memtype), BX_WRITE, 0, (Bit8u*)(&ve_info.guest_linear_addr));
765
766 access_write_physical(vm->ve_info_addr + 24, 8, &ve_info.guest_physical_addr);
767 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr + 24, 8, MEMTYPE(ve_info_memtype), BX_WRITE, 0, (Bit8u*)(&ve_info.guest_physical_addr));
768
769 access_write_physical(vm->ve_info_addr + 32, 8, &ve_info.eptp_index);
770 BX_NOTIFY_PHY_MEMORY_ACCESS(vm->ve_info_addr + 32, 8, MEMTYPE(ve_info_memtype), BX_WRITE, 0, (Bit8u*)(&ve_info.eptp_index));
771
772 exception(BX_VE_EXCEPTION, 0);
773 }
774
vmx_page_modification_logging(Bit64u guest_paddr,unsigned dirty_update)775 void BX_CPU_C::vmx_page_modification_logging(Bit64u guest_paddr, unsigned dirty_update)
776 {
777 VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
778
779 if (vm->pml_index >= 512) {
780 Bit32u vmexit_qualification = 0;
781 if (BX_CPU_THIS_PTR nmi_unblocking_iret)
782 vmexit_qualification |= (1 << 12);
783
784 VMexit(VMX_VMEXIT_PML_LOGFULL, vmexit_qualification);
785 }
786
787 if (dirty_update) {
788 Bit64u pAddr = vm->pml_address + 8 * vm->pml_index;
789 access_write_physical(pAddr, 8, &guest_paddr);
790 BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_VMX_PML_WRITE, (Bit8u*)(&guest_paddr));
791 vm->pml_index--;
792 }
793 }
794 #endif
795
796 #endif // BX_SUPPORT_VMX
797