1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_bhyve_snapshot.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/smp.h> 40 #include <sys/kernel.h> 41 #include <sys/malloc.h> 42 #include <sys/pcpu.h> 43 #include <sys/proc.h> 44 #include <sys/reg.h> 45 #include <sys/smr.h> 46 #include <sys/sysctl.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 #include <machine/psl.h> 52 #include <machine/cpufunc.h> 53 #include <machine/md_var.h> 54 #include <machine/segments.h> 55 #include <machine/smp.h> 56 #include <machine/specialreg.h> 57 #include <machine/vmparam.h> 58 59 #include <machine/vmm.h> 60 #include <machine/vmm_dev.h> 61 #include <machine/vmm_instruction_emul.h> 62 #include <machine/vmm_snapshot.h> 63 64 #include "vmm_lapic.h" 65 #include "vmm_host.h" 66 #include "vmm_ioport.h" 67 #include "vmm_ktr.h" 68 #include "vmm_stat.h" 69 #include "vatpic.h" 70 #include "vlapic.h" 71 #include "vlapic_priv.h" 72 73 #include "ept.h" 74 #include "vmx_cpufunc.h" 75 #include "vmx.h" 76 #include "vmx_msr.h" 77 #include "x86.h" 78 #include "vmx_controls.h" 79 80 #define PINBASED_CTLS_ONE_SETTING \ 81 (PINBASED_EXTINT_EXITING | \ 82 PINBASED_NMI_EXITING | \ 83 PINBASED_VIRTUAL_NMI) 84 #define PINBASED_CTLS_ZERO_SETTING 0 85 86 #define PROCBASED_CTLS_WINDOW_SETTING \ 87 (PROCBASED_INT_WINDOW_EXITING | \ 88 PROCBASED_NMI_WINDOW_EXITING) 89 90 #define PROCBASED_CTLS_ONE_SETTING \ 91 (PROCBASED_SECONDARY_CONTROLS | \ 92 PROCBASED_MWAIT_EXITING | \ 93 PROCBASED_MONITOR_EXITING | \ 94 PROCBASED_IO_EXITING | \ 95 PROCBASED_MSR_BITMAPS | \ 96 PROCBASED_CTLS_WINDOW_SETTING | \ 97 PROCBASED_CR8_LOAD_EXITING | \ 98 PROCBASED_CR8_STORE_EXITING) 99 #define PROCBASED_CTLS_ZERO_SETTING \ 100 (PROCBASED_CR3_LOAD_EXITING | \ 101 PROCBASED_CR3_STORE_EXITING | \ 102 PROCBASED_IO_BITMAPS) 103 104 #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT 105 #define PROCBASED_CTLS2_ZERO_SETTING 0 106 107 #define VM_EXIT_CTLS_ONE_SETTING \ 108 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 109 VM_EXIT_HOST_LMA | \ 110 VM_EXIT_SAVE_EFER | \ 111 VM_EXIT_LOAD_EFER | \ 112 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 113 114 #define VM_EXIT_CTLS_ZERO_SETTING 0 115 116 #define VM_ENTRY_CTLS_ONE_SETTING \ 117 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 118 VM_ENTRY_LOAD_EFER) 119 120 #define VM_ENTRY_CTLS_ZERO_SETTING \ 121 (VM_ENTRY_INTO_SMM | \ 122 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 123 124 #define HANDLED 1 125 #define UNHANDLED 0 126 127 static MALLOC_DEFINE(M_VMX, "vmx", "vmx"); 128 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic"); 129 130 bool vmx_have_msr_tsc_aux; 131 132 SYSCTL_DECL(_hw_vmm); 133 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 134 NULL); 135 136 int vmxon_enabled[MAXCPU]; 137 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); 138 139 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 140 static uint32_t exit_ctls, entry_ctls; 141 142 static uint64_t cr0_ones_mask, cr0_zeros_mask; 143 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD, 144 &cr0_ones_mask, 0, NULL); 145 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD, 146 &cr0_zeros_mask, 0, NULL); 147 148 static uint64_t cr4_ones_mask, cr4_zeros_mask; 149 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD, 150 &cr4_ones_mask, 0, NULL); 151 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD, 152 &cr4_zeros_mask, 0, NULL); 153 154 static int vmx_initialized; 155 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD, 156 &vmx_initialized, 0, "Intel VMX initialized"); 157 158 /* 159 * Optional capabilities 160 */ 161 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, 162 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 163 NULL); 164 165 static int cap_halt_exit; 166 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0, 167 "HLT triggers a VM-exit"); 168 169 static int cap_pause_exit; 170 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit, 171 0, "PAUSE triggers a VM-exit"); 172 173 static int cap_wbinvd_exit; 174 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, wbinvd_exit, CTLFLAG_RD, &cap_wbinvd_exit, 175 0, "WBINVD triggers a VM-exit"); 176 177 static int cap_rdpid; 178 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, rdpid, CTLFLAG_RD, &cap_rdpid, 0, 179 "Guests are allowed to use RDPID"); 180 181 static int cap_rdtscp; 182 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, rdtscp, CTLFLAG_RD, &cap_rdtscp, 0, 183 "Guests are allowed to use RDTSCP"); 184 185 static int cap_unrestricted_guest; 186 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD, 187 &cap_unrestricted_guest, 0, "Unrestricted guests"); 188 189 static int cap_monitor_trap; 190 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD, 191 &cap_monitor_trap, 0, "Monitor trap flag"); 192 193 static int cap_invpcid; 194 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid, 195 0, "Guests are allowed to use INVPCID"); 196 197 static int tpr_shadowing; 198 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RD, 199 &tpr_shadowing, 0, "TPR shadowing support"); 200 201 static int virtual_interrupt_delivery; 202 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD, 203 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support"); 204 205 static int posted_interrupts; 206 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD, 207 &posted_interrupts, 0, "APICv posted interrupt support"); 208 209 static int pirvec = -1; 210 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD, 211 &pirvec, 0, "APICv posted interrupt vector"); 212 213 static struct unrhdr *vpid_unr; 214 static u_int vpid_alloc_failed; 215 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD, 216 &vpid_alloc_failed, 0, NULL); 217 218 int guest_l1d_flush; 219 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD, 220 &guest_l1d_flush, 0, NULL); 221 int guest_l1d_flush_sw; 222 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD, 223 &guest_l1d_flush_sw, 0, NULL); 224 225 static struct msr_entry msr_load_list[1] __aligned(16); 226 227 /* 228 * The definitions of SDT probes for VMX. 229 */ 230 231 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 232 "struct vmx *", "int", "struct vm_exit *"); 233 234 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 235 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 236 237 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 238 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 239 240 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 241 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 242 243 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 244 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 245 246 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 247 "struct vmx *", "int", "struct vm_exit *"); 248 249 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 250 "struct vmx *", "int", "struct vm_exit *"); 251 252 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 253 "struct vmx *", "int", "struct vm_exit *"); 254 255 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 259 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 260 261 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 262 "struct vmx *", "int", "struct vm_exit *"); 263 264 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 265 "struct vmx *", "int", "struct vm_exit *"); 266 267 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 268 "struct vmx *", "int", "struct vm_exit *"); 269 270 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 271 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 272 273 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 274 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 275 276 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 277 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 278 279 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 280 "struct vmx *", "int", "struct vm_exit *"); 281 282 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 283 "struct vmx *", "int", "struct vm_exit *"); 284 285 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 286 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 287 288 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 289 "struct vmx *", "int", "struct vm_exit *"); 290 291 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 292 "struct vmx *", "int", "struct vm_exit *"); 293 294 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 295 "struct vmx *", "int", "struct vm_exit *"); 296 297 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 298 "struct vmx *", "int", "struct vm_exit *"); 299 300 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 301 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 302 303 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 304 "struct vmx *", "int", "struct vm_exit *", "int"); 305 306 /* 307 * Use the last page below 4GB as the APIC access address. This address is 308 * occupied by the boot firmware so it is guaranteed that it will not conflict 309 * with a page in system memory. 310 */ 311 #define APIC_ACCESS_ADDRESS 0xFFFFF000 312 313 static int vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc); 314 static int vmx_getreg(void *vcpui, int reg, uint64_t *retval); 315 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val); 316 static void vmx_inject_pir(struct vlapic *vlapic); 317 #ifdef BHYVE_SNAPSHOT 318 static int vmx_restore_tsc(void *vcpui, uint64_t now); 319 #endif 320 321 static inline bool 322 host_has_rdpid(void) 323 { 324 return ((cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0); 325 } 326 327 static inline bool 328 host_has_rdtscp(void) 329 { 330 return ((amd_feature & AMDID_RDTSCP) != 0); 331 } 332 333 #ifdef KTR 334 static const char * 335 exit_reason_to_str(int reason) 336 { 337 static char reasonbuf[32]; 338 339 switch (reason) { 340 case EXIT_REASON_EXCEPTION: 341 return "exception"; 342 case EXIT_REASON_EXT_INTR: 343 return "extint"; 344 case EXIT_REASON_TRIPLE_FAULT: 345 return "triplefault"; 346 case EXIT_REASON_INIT: 347 return "init"; 348 case EXIT_REASON_SIPI: 349 return "sipi"; 350 case EXIT_REASON_IO_SMI: 351 return "iosmi"; 352 case EXIT_REASON_SMI: 353 return "smi"; 354 case EXIT_REASON_INTR_WINDOW: 355 return "intrwindow"; 356 case EXIT_REASON_NMI_WINDOW: 357 return "nmiwindow"; 358 case EXIT_REASON_TASK_SWITCH: 359 return "taskswitch"; 360 case EXIT_REASON_CPUID: 361 return "cpuid"; 362 case EXIT_REASON_GETSEC: 363 return "getsec"; 364 case EXIT_REASON_HLT: 365 return "hlt"; 366 case EXIT_REASON_INVD: 367 return "invd"; 368 case EXIT_REASON_INVLPG: 369 return "invlpg"; 370 case EXIT_REASON_RDPMC: 371 return "rdpmc"; 372 case EXIT_REASON_RDTSC: 373 return "rdtsc"; 374 case EXIT_REASON_RSM: 375 return "rsm"; 376 case EXIT_REASON_VMCALL: 377 return "vmcall"; 378 case EXIT_REASON_VMCLEAR: 379 return "vmclear"; 380 case EXIT_REASON_VMLAUNCH: 381 return "vmlaunch"; 382 case EXIT_REASON_VMPTRLD: 383 return "vmptrld"; 384 case EXIT_REASON_VMPTRST: 385 return "vmptrst"; 386 case EXIT_REASON_VMREAD: 387 return "vmread"; 388 case EXIT_REASON_VMRESUME: 389 return "vmresume"; 390 case EXIT_REASON_VMWRITE: 391 return "vmwrite"; 392 case EXIT_REASON_VMXOFF: 393 return "vmxoff"; 394 case EXIT_REASON_VMXON: 395 return "vmxon"; 396 case EXIT_REASON_CR_ACCESS: 397 return "craccess"; 398 case EXIT_REASON_DR_ACCESS: 399 return "draccess"; 400 case EXIT_REASON_INOUT: 401 return "inout"; 402 case EXIT_REASON_RDMSR: 403 return "rdmsr"; 404 case EXIT_REASON_WRMSR: 405 return "wrmsr"; 406 case EXIT_REASON_INVAL_VMCS: 407 return "invalvmcs"; 408 case EXIT_REASON_INVAL_MSR: 409 return "invalmsr"; 410 case EXIT_REASON_MWAIT: 411 return "mwait"; 412 case EXIT_REASON_MTF: 413 return "mtf"; 414 case EXIT_REASON_MONITOR: 415 return "monitor"; 416 case EXIT_REASON_PAUSE: 417 return "pause"; 418 case EXIT_REASON_MCE_DURING_ENTRY: 419 return "mce-during-entry"; 420 case EXIT_REASON_TPR: 421 return "tpr"; 422 case EXIT_REASON_APIC_ACCESS: 423 return "apic-access"; 424 case EXIT_REASON_GDTR_IDTR: 425 return "gdtridtr"; 426 case EXIT_REASON_LDTR_TR: 427 return "ldtrtr"; 428 case EXIT_REASON_EPT_FAULT: 429 return "eptfault"; 430 case EXIT_REASON_EPT_MISCONFIG: 431 return "eptmisconfig"; 432 case EXIT_REASON_INVEPT: 433 return "invept"; 434 case EXIT_REASON_RDTSCP: 435 return "rdtscp"; 436 case EXIT_REASON_VMX_PREEMPT: 437 return "vmxpreempt"; 438 case EXIT_REASON_INVVPID: 439 return "invvpid"; 440 case EXIT_REASON_WBINVD: 441 return "wbinvd"; 442 case EXIT_REASON_XSETBV: 443 return "xsetbv"; 444 case EXIT_REASON_APIC_WRITE: 445 return "apic-write"; 446 default: 447 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason); 448 return (reasonbuf); 449 } 450 } 451 #endif /* KTR */ 452 453 static int 454 vmx_allow_x2apic_msrs(struct vmx *vmx) 455 { 456 int i, error; 457 458 error = 0; 459 460 /* 461 * Allow readonly access to the following x2APIC MSRs from the guest. 462 */ 463 error += guest_msr_ro(vmx, MSR_APIC_ID); 464 error += guest_msr_ro(vmx, MSR_APIC_VERSION); 465 error += guest_msr_ro(vmx, MSR_APIC_LDR); 466 error += guest_msr_ro(vmx, MSR_APIC_SVR); 467 468 for (i = 0; i < 8; i++) 469 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i); 470 471 for (i = 0; i < 8; i++) 472 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); 473 474 for (i = 0; i < 8; i++) 475 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); 476 477 error += guest_msr_ro(vmx, MSR_APIC_ESR); 478 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER); 479 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL); 480 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT); 481 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0); 482 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1); 483 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR); 484 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER); 485 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER); 486 error += guest_msr_ro(vmx, MSR_APIC_ICR); 487 488 /* 489 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 490 * 491 * These registers get special treatment described in the section 492 * "Virtualizing MSR-Based APIC Accesses". 493 */ 494 error += guest_msr_rw(vmx, MSR_APIC_TPR); 495 error += guest_msr_rw(vmx, MSR_APIC_EOI); 496 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI); 497 498 return (error); 499 } 500 501 u_long 502 vmx_fix_cr0(u_long cr0) 503 { 504 505 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 506 } 507 508 u_long 509 vmx_fix_cr4(u_long cr4) 510 { 511 512 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 513 } 514 515 static void 516 vpid_free(int vpid) 517 { 518 if (vpid < 0 || vpid > 0xffff) 519 panic("vpid_free: invalid vpid %d", vpid); 520 521 /* 522 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 523 * the unit number allocator. 524 */ 525 526 if (vpid > VM_MAXCPU) 527 free_unr(vpid_unr, vpid); 528 } 529 530 static void 531 vpid_alloc(uint16_t *vpid, int num) 532 { 533 int i, x; 534 535 if (num <= 0 || num > VM_MAXCPU) 536 panic("invalid number of vpids requested: %d", num); 537 538 /* 539 * If the "enable vpid" execution control is not enabled then the 540 * VPID is required to be 0 for all vcpus. 541 */ 542 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 543 for (i = 0; i < num; i++) 544 vpid[i] = 0; 545 return; 546 } 547 548 /* 549 * Allocate a unique VPID for each vcpu from the unit number allocator. 550 */ 551 for (i = 0; i < num; i++) { 552 x = alloc_unr(vpid_unr); 553 if (x == -1) 554 break; 555 else 556 vpid[i] = x; 557 } 558 559 if (i < num) { 560 atomic_add_int(&vpid_alloc_failed, 1); 561 562 /* 563 * If the unit number allocator does not have enough unique 564 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 565 * 566 * These VPIDs are not be unique across VMs but this does not 567 * affect correctness because the combined mappings are also 568 * tagged with the EP4TA which is unique for each VM. 569 * 570 * It is still sub-optimal because the invvpid will invalidate 571 * combined mappings for a particular VPID across all EP4TAs. 572 */ 573 while (i-- > 0) 574 vpid_free(vpid[i]); 575 576 for (i = 0; i < num; i++) 577 vpid[i] = i + 1; 578 } 579 } 580 581 static void 582 vpid_init(void) 583 { 584 /* 585 * VPID 0 is required when the "enable VPID" execution control is 586 * disabled. 587 * 588 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the 589 * unit number allocator does not have sufficient unique VPIDs to 590 * satisfy the allocation. 591 * 592 * The remaining VPIDs are managed by the unit number allocator. 593 */ 594 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL); 595 } 596 597 static void 598 vmx_disable(void *arg __unused) 599 { 600 struct invvpid_desc invvpid_desc = { 0 }; 601 struct invept_desc invept_desc = { 0 }; 602 603 if (vmxon_enabled[curcpu]) { 604 /* 605 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b. 606 * 607 * VMXON or VMXOFF are not required to invalidate any TLB 608 * caching structures. This prevents potential retention of 609 * cached information in the TLB between distinct VMX episodes. 610 */ 611 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc); 612 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc); 613 vmxoff(); 614 } 615 load_cr4(rcr4() & ~CR4_VMXE); 616 } 617 618 static int 619 vmx_modcleanup(void) 620 { 621 622 if (pirvec >= 0) 623 lapic_ipi_free(pirvec); 624 625 if (vpid_unr != NULL) { 626 delete_unrhdr(vpid_unr); 627 vpid_unr = NULL; 628 } 629 630 if (nmi_flush_l1d_sw == 1) 631 nmi_flush_l1d_sw = 0; 632 633 smp_rendezvous(NULL, vmx_disable, NULL, NULL); 634 635 return (0); 636 } 637 638 static void 639 vmx_enable(void *arg __unused) 640 { 641 int error; 642 uint64_t feature_control; 643 644 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 645 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 || 646 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 647 wrmsr(MSR_IA32_FEATURE_CONTROL, 648 feature_control | IA32_FEATURE_CONTROL_VMX_EN | 649 IA32_FEATURE_CONTROL_LOCK); 650 } 651 652 load_cr4(rcr4() | CR4_VMXE); 653 654 *(uint32_t *)vmxon_region[curcpu] = vmx_revision(); 655 error = vmxon(vmxon_region[curcpu]); 656 if (error == 0) 657 vmxon_enabled[curcpu] = 1; 658 } 659 660 static void 661 vmx_modresume(void) 662 { 663 664 if (vmxon_enabled[curcpu]) 665 vmxon(vmxon_region[curcpu]); 666 } 667 668 static int 669 vmx_modinit(int ipinum) 670 { 671 int error; 672 uint64_t basic, fixed0, fixed1, feature_control; 673 uint32_t tmp, procbased2_vid_bits; 674 675 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */ 676 if (!(cpu_feature2 & CPUID2_VMX)) { 677 printf("vmx_modinit: processor does not support VMX " 678 "operation\n"); 679 return (ENXIO); 680 } 681 682 /* 683 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits 684 * are set (bits 0 and 2 respectively). 685 */ 686 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); 687 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 && 688 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) { 689 printf("vmx_modinit: VMX operation disabled by BIOS\n"); 690 return (ENXIO); 691 } 692 693 /* 694 * Verify capabilities MSR_VMX_BASIC: 695 * - bit 54 indicates support for INS/OUTS decoding 696 */ 697 basic = rdmsr(MSR_VMX_BASIC); 698 if ((basic & (1UL << 54)) == 0) { 699 printf("vmx_modinit: processor does not support desired basic " 700 "capabilities\n"); 701 return (EINVAL); 702 } 703 704 /* Check support for primary processor-based VM-execution controls */ 705 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 706 MSR_VMX_TRUE_PROCBASED_CTLS, 707 PROCBASED_CTLS_ONE_SETTING, 708 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 709 if (error) { 710 printf("vmx_modinit: processor does not support desired " 711 "primary processor-based controls\n"); 712 return (error); 713 } 714 715 /* Clear the processor-based ctl bits that are set on demand */ 716 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 717 718 /* Check support for secondary processor-based VM-execution controls */ 719 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 720 MSR_VMX_PROCBASED_CTLS2, 721 PROCBASED_CTLS2_ONE_SETTING, 722 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 723 if (error) { 724 printf("vmx_modinit: processor does not support desired " 725 "secondary processor-based controls\n"); 726 return (error); 727 } 728 729 /* Check support for VPID */ 730 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 731 PROCBASED2_ENABLE_VPID, 0, &tmp); 732 if (error == 0) 733 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 734 735 /* Check support for pin-based VM-execution controls */ 736 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 737 MSR_VMX_TRUE_PINBASED_CTLS, 738 PINBASED_CTLS_ONE_SETTING, 739 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 740 if (error) { 741 printf("vmx_modinit: processor does not support desired " 742 "pin-based controls\n"); 743 return (error); 744 } 745 746 /* Check support for VM-exit controls */ 747 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 748 VM_EXIT_CTLS_ONE_SETTING, 749 VM_EXIT_CTLS_ZERO_SETTING, 750 &exit_ctls); 751 if (error) { 752 printf("vmx_modinit: processor does not support desired " 753 "exit controls\n"); 754 return (error); 755 } 756 757 /* Check support for VM-entry controls */ 758 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 759 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 760 &entry_ctls); 761 if (error) { 762 printf("vmx_modinit: processor does not support desired " 763 "entry controls\n"); 764 return (error); 765 } 766 767 /* 768 * Check support for optional features by testing them 769 * as individual bits 770 */ 771 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 772 MSR_VMX_TRUE_PROCBASED_CTLS, 773 PROCBASED_HLT_EXITING, 0, 774 &tmp) == 0); 775 776 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 777 MSR_VMX_PROCBASED_CTLS, 778 PROCBASED_MTF, 0, 779 &tmp) == 0); 780 781 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 782 MSR_VMX_TRUE_PROCBASED_CTLS, 783 PROCBASED_PAUSE_EXITING, 0, 784 &tmp) == 0); 785 786 cap_wbinvd_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 787 MSR_VMX_PROCBASED_CTLS2, 788 PROCBASED2_WBINVD_EXITING, 789 0, 790 &tmp) == 0); 791 792 /* 793 * Check support for RDPID and/or RDTSCP. 794 * 795 * Support a pass-through-based implementation of these via the 796 * "enable RDTSCP" VM-execution control and the "RDTSC exiting" 797 * VM-execution control. 798 * 799 * The "enable RDTSCP" VM-execution control applies to both RDPID 800 * and RDTSCP (see SDM volume 3, section 25.3, "Changes to 801 * Instruction Behavior in VMX Non-root operation"); this is why 802 * only this VM-execution control needs to be enabled in order to 803 * enable passing through whichever of RDPID and/or RDTSCP are 804 * supported by the host. 805 * 806 * The "RDTSC exiting" VM-execution control applies to both RDTSC 807 * and RDTSCP (again, per SDM volume 3, section 25.3), and is 808 * already set up for RDTSC and RDTSCP pass-through by the current 809 * implementation of RDTSC. 810 * 811 * Although RDPID and RDTSCP are optional capabilities, since there 812 * does not currently seem to be a use case for enabling/disabling 813 * these via libvmmapi, choose not to support this and, instead, 814 * just statically always enable or always disable this support 815 * across all vCPUs on all VMs. (Note that there may be some 816 * complications to providing this functionality, e.g., the MSR 817 * bitmap is currently per-VM rather than per-vCPU while the 818 * capability API wants to be able to control capabilities on a 819 * per-vCPU basis). 820 */ 821 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 822 MSR_VMX_PROCBASED_CTLS2, 823 PROCBASED2_ENABLE_RDTSCP, 0, &tmp); 824 cap_rdpid = error == 0 && host_has_rdpid(); 825 cap_rdtscp = error == 0 && host_has_rdtscp(); 826 if (cap_rdpid || cap_rdtscp) { 827 procbased_ctls2 |= PROCBASED2_ENABLE_RDTSCP; 828 vmx_have_msr_tsc_aux = true; 829 } 830 831 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 832 MSR_VMX_PROCBASED_CTLS2, 833 PROCBASED2_UNRESTRICTED_GUEST, 0, 834 &tmp) == 0); 835 836 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 837 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 838 &tmp) == 0); 839 840 /* 841 * Check support for TPR shadow. 842 */ 843 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 844 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0, 845 &tmp); 846 if (error == 0) { 847 tpr_shadowing = 1; 848 TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing", 849 &tpr_shadowing); 850 } 851 852 if (tpr_shadowing) { 853 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 854 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 855 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING; 856 } 857 858 /* 859 * Check support for virtual interrupt delivery. 860 */ 861 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 862 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 863 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 864 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 865 866 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 867 procbased2_vid_bits, 0, &tmp); 868 if (error == 0 && tpr_shadowing) { 869 virtual_interrupt_delivery = 1; 870 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid", 871 &virtual_interrupt_delivery); 872 } 873 874 if (virtual_interrupt_delivery) { 875 procbased_ctls |= PROCBASED_USE_TPR_SHADOW; 876 procbased_ctls2 |= procbased2_vid_bits; 877 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE; 878 879 /* 880 * Check for Posted Interrupts only if Virtual Interrupt 881 * Delivery is enabled. 882 */ 883 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 884 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0, 885 &tmp); 886 if (error == 0) { 887 pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) : 888 &IDTVEC(justreturn)); 889 if (pirvec < 0) { 890 if (bootverbose) { 891 printf("vmx_modinit: unable to " 892 "allocate posted interrupt " 893 "vector\n"); 894 } 895 } else { 896 posted_interrupts = 1; 897 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir", 898 &posted_interrupts); 899 } 900 } 901 } 902 903 if (posted_interrupts) 904 pinbased_ctls |= PINBASED_POSTED_INTERRUPT; 905 906 /* Initialize EPT */ 907 error = ept_init(ipinum); 908 if (error) { 909 printf("vmx_modinit: ept initialization failed (%d)\n", error); 910 return (error); 911 } 912 913 guest_l1d_flush = (cpu_ia32_arch_caps & 914 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 915 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 916 917 /* 918 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 919 * available. Otherwise fall back to the software flush 920 * method which loads enough data from the kernel text to 921 * flush existing L1D content, both on VMX entry and on NMI 922 * return. 923 */ 924 if (guest_l1d_flush) { 925 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 926 guest_l1d_flush_sw = 1; 927 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 928 &guest_l1d_flush_sw); 929 } 930 if (guest_l1d_flush_sw) { 931 if (nmi_flush_l1d_sw <= 1) 932 nmi_flush_l1d_sw = 1; 933 } else { 934 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 935 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 936 } 937 } 938 939 /* 940 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 941 */ 942 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 943 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 944 cr0_ones_mask = fixed0 & fixed1; 945 cr0_zeros_mask = ~fixed0 & ~fixed1; 946 947 /* 948 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation 949 * if unrestricted guest execution is allowed. 950 */ 951 if (cap_unrestricted_guest) 952 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 953 954 /* 955 * Do not allow the guest to set CR0_NW or CR0_CD. 956 */ 957 cr0_zeros_mask |= (CR0_NW | CR0_CD); 958 959 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 960 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 961 cr4_ones_mask = fixed0 & fixed1; 962 cr4_zeros_mask = ~fixed0 & ~fixed1; 963 964 vpid_init(); 965 966 vmx_msr_init(); 967 968 /* enable VMX operation */ 969 smp_rendezvous(NULL, vmx_enable, NULL, NULL); 970 971 vmx_initialized = 1; 972 973 return (0); 974 } 975 976 static void 977 vmx_trigger_hostintr(int vector) 978 { 979 uintptr_t func; 980 struct gate_descriptor *gd; 981 982 gd = &idt[vector]; 983 984 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: " 985 "invalid vector %d", vector)); 986 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present", 987 vector)); 988 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d " 989 "has invalid type %d", vector, gd->gd_type)); 990 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d " 991 "has invalid dpl %d", vector, gd->gd_dpl)); 992 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor " 993 "for vector %d has invalid selector %d", vector, gd->gd_selector)); 994 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid " 995 "IST %d", vector, gd->gd_ist)); 996 997 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset); 998 vmx_call_isr(func); 999 } 1000 1001 static int 1002 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial) 1003 { 1004 int error, mask_ident, shadow_ident; 1005 uint64_t mask_value; 1006 1007 if (which != 0 && which != 4) 1008 panic("vmx_setup_cr_shadow: unknown cr%d", which); 1009 1010 if (which == 0) { 1011 mask_ident = VMCS_CR0_MASK; 1012 mask_value = cr0_ones_mask | cr0_zeros_mask; 1013 shadow_ident = VMCS_CR0_SHADOW; 1014 } else { 1015 mask_ident = VMCS_CR4_MASK; 1016 mask_value = cr4_ones_mask | cr4_zeros_mask; 1017 shadow_ident = VMCS_CR4_SHADOW; 1018 } 1019 1020 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value); 1021 if (error) 1022 return (error); 1023 1024 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial); 1025 if (error) 1026 return (error); 1027 1028 return (0); 1029 } 1030 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init)) 1031 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init)) 1032 1033 static void * 1034 vmx_init(struct vm *vm, pmap_t pmap) 1035 { 1036 int error; 1037 struct vmx *vmx; 1038 uint16_t maxcpus = vm_get_maxcpus(vm); 1039 1040 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO); 1041 vmx->vm = vm; 1042 1043 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pmltop)); 1044 1045 /* 1046 * Clean up EPTP-tagged guest physical and combined mappings 1047 * 1048 * VMX transitions are not required to invalidate any guest physical 1049 * mappings. So, it may be possible for stale guest physical mappings 1050 * to be present in the processor TLBs. 1051 * 1052 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 1053 */ 1054 ept_invalidate_mappings(vmx->eptp); 1055 1056 vmx->msr_bitmap = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, 1057 M_WAITOK | M_ZERO); 1058 msr_bitmap_initialize(vmx->msr_bitmap); 1059 1060 /* 1061 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE. 1062 * The guest FSBASE and GSBASE are saved and restored during 1063 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are 1064 * always restored from the vmcs host state area on vm-exit. 1065 * 1066 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in 1067 * how they are saved/restored so can be directly accessed by the 1068 * guest. 1069 * 1070 * MSR_EFER is saved and restored in the guest VMCS area on a 1071 * VM exit and entry respectively. It is also restored from the 1072 * host VMCS area on a VM exit. 1073 * 1074 * The TSC MSR is exposed read-only. Writes are disallowed as 1075 * that will impact the host TSC. If the guest does a write 1076 * the "use TSC offsetting" execution control is enabled and the 1077 * difference between the host TSC and the guest TSC is written 1078 * into the TSC offset in the VMCS. 1079 * 1080 * Guest TSC_AUX support is enabled if any of guest RDPID and/or 1081 * guest RDTSCP support are enabled (since, as per Table 2-2 in SDM 1082 * volume 4, TSC_AUX is supported if any of RDPID and/or RDTSCP are 1083 * supported). If guest TSC_AUX support is enabled, TSC_AUX is 1084 * exposed read-only so that the VMM can do one fewer MSR read per 1085 * exit than if this register were exposed read-write; the guest 1086 * restore value can be updated during guest writes (expected to be 1087 * rare) instead of during all exits (common). 1088 */ 1089 if (guest_msr_rw(vmx, MSR_GSBASE) || 1090 guest_msr_rw(vmx, MSR_FSBASE) || 1091 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) || 1092 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || 1093 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || 1094 guest_msr_rw(vmx, MSR_EFER) || 1095 guest_msr_ro(vmx, MSR_TSC) || 1096 ((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX))) 1097 panic("vmx_init: error setting guest msr access"); 1098 1099 vpid_alloc(vmx->vpids, maxcpus); 1100 1101 if (virtual_interrupt_delivery) { 1102 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 1103 APIC_ACCESS_ADDRESS); 1104 /* XXX this should really return an error to the caller */ 1105 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 1106 } 1107 1108 vmx->pmap = pmap; 1109 return (vmx); 1110 } 1111 1112 static void * 1113 vmx_vcpu_init(void *vmi, int vcpuid) 1114 { 1115 struct vmx *vmx = vmi; 1116 struct vmcs *vmcs; 1117 struct vmx_vcpu *vcpu; 1118 uint32_t exc_bitmap; 1119 int error; 1120 1121 vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO); 1122 vcpu->vmx = vmx; 1123 vcpu->vcpuid = vcpuid; 1124 vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX, 1125 M_WAITOK | M_ZERO); 1126 vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX, 1127 M_WAITOK | M_ZERO); 1128 vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX, 1129 M_WAITOK | M_ZERO); 1130 1131 vmcs = vcpu->vmcs; 1132 vmcs->identifier = vmx_revision(); 1133 error = vmclear(vmcs); 1134 if (error != 0) { 1135 panic("vmx_init: vmclear error %d on vcpu %d\n", 1136 error, vcpuid); 1137 } 1138 1139 vmx_msr_guest_init(vmx, vcpu); 1140 1141 error = vmcs_init(vmcs); 1142 KASSERT(error == 0, ("vmcs_init error %d", error)); 1143 1144 VMPTRLD(vmcs); 1145 error = 0; 1146 error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx); 1147 error += vmwrite(VMCS_EPTP, vmx->eptp); 1148 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls); 1149 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls); 1150 if (vcpu_trap_wbinvd(vmx->vm, vcpuid)) { 1151 KASSERT(cap_wbinvd_exit, ("WBINVD trap not available")); 1152 procbased_ctls2 |= PROCBASED2_WBINVD_EXITING; 1153 } 1154 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2); 1155 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls); 1156 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls); 1157 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap)); 1158 error += vmwrite(VMCS_VPID, vmx->vpids[vcpuid]); 1159 1160 if (guest_l1d_flush && !guest_l1d_flush_sw) { 1161 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract( 1162 (vm_offset_t)&msr_load_list[0])); 1163 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 1164 nitems(msr_load_list)); 1165 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 1166 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 1167 } 1168 1169 /* exception bitmap */ 1170 if (vcpu_trace_exceptions(vmx->vm, vcpuid)) 1171 exc_bitmap = 0xffffffff; 1172 else 1173 exc_bitmap = 1 << IDT_MC; 1174 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap); 1175 1176 vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1; 1177 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 1178 1179 if (tpr_shadowing) { 1180 error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page)); 1181 } 1182 1183 if (virtual_interrupt_delivery) { 1184 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS); 1185 error += vmwrite(VMCS_EOI_EXIT0, 0); 1186 error += vmwrite(VMCS_EOI_EXIT1, 0); 1187 error += vmwrite(VMCS_EOI_EXIT2, 0); 1188 error += vmwrite(VMCS_EOI_EXIT3, 0); 1189 } 1190 if (posted_interrupts) { 1191 error += vmwrite(VMCS_PIR_VECTOR, pirvec); 1192 error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc)); 1193 } 1194 VMCLEAR(vmcs); 1195 KASSERT(error == 0, ("vmx_init: error customizing the vmcs")); 1196 1197 vcpu->cap.set = 0; 1198 vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0; 1199 vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0; 1200 vcpu->cap.proc_ctls = procbased_ctls; 1201 vcpu->cap.proc_ctls2 = procbased_ctls2; 1202 vcpu->cap.exc_bitmap = exc_bitmap; 1203 1204 vcpu->state.nextrip = ~0; 1205 vcpu->state.lastcpu = NOCPU; 1206 vcpu->state.vpid = vmx->vpids[vcpuid]; 1207 1208 /* 1209 * Set up the CR0/4 shadows, and init the read shadow 1210 * to the power-on register value from the Intel Sys Arch. 1211 * CR0 - 0x60000010 1212 * CR4 - 0 1213 */ 1214 error = vmx_setup_cr0_shadow(vmcs, 0x60000010); 1215 if (error != 0) 1216 panic("vmx_setup_cr0_shadow %d", error); 1217 1218 error = vmx_setup_cr4_shadow(vmcs, 0); 1219 if (error != 0) 1220 panic("vmx_setup_cr4_shadow %d", error); 1221 1222 vcpu->ctx.pmap = vmx->pmap; 1223 1224 return (vcpu); 1225 } 1226 1227 static int 1228 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) 1229 { 1230 int handled; 1231 1232 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax, 1233 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx, 1234 (uint64_t *)&vmxctx->guest_rdx); 1235 return (handled); 1236 } 1237 1238 static __inline void 1239 vmx_run_trace(struct vmx_vcpu *vcpu) 1240 { 1241 #ifdef KTR 1242 VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "Resume execution at %#lx", 1243 vmcs_guest_rip()); 1244 #endif 1245 } 1246 1247 static __inline void 1248 vmx_exit_trace(struct vmx_vcpu *vcpu, uint64_t rip, uint32_t exit_reason, 1249 int handled) 1250 { 1251 #ifdef KTR 1252 VCPU_CTR3(vcpu->vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx", 1253 handled ? "handled" : "unhandled", 1254 exit_reason_to_str(exit_reason), rip); 1255 #endif 1256 } 1257 1258 static __inline void 1259 vmx_astpending_trace(struct vmx_vcpu *vcpu, uint64_t rip) 1260 { 1261 #ifdef KTR 1262 VCPU_CTR1(vcpu->vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx", 1263 rip); 1264 #endif 1265 } 1266 1267 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 1268 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 1269 1270 /* 1271 * Invalidate guest mappings identified by its vpid from the TLB. 1272 */ 1273 static __inline void 1274 vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running) 1275 { 1276 struct vmxstate *vmxstate; 1277 struct invvpid_desc invvpid_desc; 1278 1279 vmxstate = &vcpu->state; 1280 if (vmxstate->vpid == 0) 1281 return; 1282 1283 if (!running) { 1284 /* 1285 * Set the 'lastcpu' to an invalid host cpu. 1286 * 1287 * This will invalidate TLB entries tagged with the vcpu's 1288 * vpid the next time it runs via vmx_set_pcpu_defaults(). 1289 */ 1290 vmxstate->lastcpu = NOCPU; 1291 return; 1292 } 1293 1294 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside " 1295 "critical section", __func__, vcpu->vcpuid)); 1296 1297 /* 1298 * Invalidate all mappings tagged with 'vpid' 1299 * 1300 * We do this because this vcpu was executing on a different host 1301 * cpu when it last ran. We do not track whether it invalidated 1302 * mappings associated with its 'vpid' during that run. So we must 1303 * assume that the mappings associated with 'vpid' on 'curcpu' are 1304 * stale and invalidate them. 1305 * 1306 * Note that we incur this penalty only when the scheduler chooses to 1307 * move the thread associated with this vcpu between host cpus. 1308 * 1309 * Note also that this will invalidate mappings tagged with 'vpid' 1310 * for "all" EP4TAs. 1311 */ 1312 if (atomic_load_long(&pmap->pm_eptgen) == vmx->eptgen[curcpu]) { 1313 invvpid_desc._res1 = 0; 1314 invvpid_desc._res2 = 0; 1315 invvpid_desc.vpid = vmxstate->vpid; 1316 invvpid_desc.linear_addr = 0; 1317 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 1318 vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_DONE, 1); 1319 } else { 1320 /* 1321 * The invvpid can be skipped if an invept is going to 1322 * be performed before entering the guest. The invept 1323 * will invalidate combined mappings tagged with 1324 * 'vmx->eptp' for all vpids. 1325 */ 1326 vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_SAVED, 1); 1327 } 1328 } 1329 1330 static void 1331 vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap) 1332 { 1333 struct vmxstate *vmxstate; 1334 1335 vmxstate = &vcpu->state; 1336 if (vmxstate->lastcpu == curcpu) 1337 return; 1338 1339 vmxstate->lastcpu = curcpu; 1340 1341 vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_MIGRATIONS, 1); 1342 1343 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1344 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1345 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1346 vmx_invvpid(vmx, vcpu, pmap, 1); 1347 } 1348 1349 /* 1350 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set. 1351 */ 1352 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0); 1353 1354 static void __inline 1355 vmx_set_int_window_exiting(struct vmx_vcpu *vcpu) 1356 { 1357 1358 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1359 vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1360 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 1361 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, 1362 "Enabling interrupt window exiting"); 1363 } 1364 } 1365 1366 static void __inline 1367 vmx_clear_int_window_exiting(struct vmx_vcpu *vcpu) 1368 { 1369 1370 KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1371 ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls)); 1372 vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1373 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 1374 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, 1375 "Disabling interrupt window exiting"); 1376 } 1377 1378 static void __inline 1379 vmx_set_nmi_window_exiting(struct vmx_vcpu *vcpu) 1380 { 1381 1382 if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) { 1383 vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1384 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 1385 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, 1386 "Enabling NMI window exiting"); 1387 } 1388 } 1389 1390 static void __inline 1391 vmx_clear_nmi_window_exiting(struct vmx_vcpu *vcpu) 1392 { 1393 1394 KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0, 1395 ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls)); 1396 vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1397 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 1398 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting"); 1399 } 1400 1401 int 1402 vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset) 1403 { 1404 int error; 1405 1406 if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) { 1407 vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET; 1408 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls); 1409 VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Enabling TSC offsetting"); 1410 } 1411 1412 error = vmwrite(VMCS_TSC_OFFSET, offset); 1413 #ifdef BHYVE_SNAPSHOT 1414 if (error == 0) 1415 error = vm_set_tsc_offset(vmx->vm, vcpu->vcpuid, offset); 1416 #endif 1417 return (error); 1418 } 1419 1420 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1421 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1422 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1423 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1424 1425 static void 1426 vmx_inject_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu) 1427 { 1428 uint32_t gi __diagused, info; 1429 1430 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1431 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest " 1432 "interruptibility-state %#x", gi)); 1433 1434 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1435 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid " 1436 "VM-entry interruption information %#x", info)); 1437 1438 /* 1439 * Inject the virtual NMI. The vector must be the NMI IDT entry 1440 * or the VMCS entry check will fail. 1441 */ 1442 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID; 1443 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1444 1445 VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Injecting vNMI"); 1446 1447 /* Clear the request */ 1448 vm_nmi_clear(vmx->vm, vcpu->vcpuid); 1449 } 1450 1451 static void 1452 vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu, 1453 struct vlapic *vlapic, uint64_t guestrip) 1454 { 1455 int vector, need_nmi_exiting, extint_pending; 1456 uint64_t rflags, entryinfo; 1457 uint32_t gi, info; 1458 1459 if (vcpu->state.nextrip != guestrip) { 1460 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1461 if (gi & HWINTR_BLOCKING) { 1462 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking " 1463 "cleared due to rip change: %#lx/%#lx", 1464 vcpu->state.nextrip, guestrip); 1465 gi &= ~HWINTR_BLOCKING; 1466 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1467 } 1468 } 1469 1470 if (vm_entry_intinfo(vmx->vm, vcpu->vcpuid, &entryinfo)) { 1471 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry " 1472 "intinfo is not valid: %#lx", __func__, entryinfo)); 1473 1474 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1475 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject " 1476 "pending exception: %#lx/%#x", __func__, entryinfo, info)); 1477 1478 info = entryinfo; 1479 vector = info & 0xff; 1480 if (vector == IDT_BP || vector == IDT_OF) { 1481 /* 1482 * VT-x requires #BP and #OF to be injected as software 1483 * exceptions. 1484 */ 1485 info &= ~VMCS_INTR_T_MASK; 1486 info |= VMCS_INTR_T_SWEXCEPTION; 1487 } 1488 1489 if (info & VMCS_INTR_DEL_ERRCODE) 1490 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32); 1491 1492 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1493 } 1494 1495 if (vm_nmi_pending(vmx->vm, vcpu->vcpuid)) { 1496 /* 1497 * If there are no conditions blocking NMI injection then 1498 * inject it directly here otherwise enable "NMI window 1499 * exiting" to inject it as soon as we can. 1500 * 1501 * We also check for STI_BLOCKING because some implementations 1502 * don't allow NMI injection in this case. If we are running 1503 * on a processor that doesn't have this restriction it will 1504 * immediately exit and the NMI will be injected in the 1505 * "NMI window exiting" handler. 1506 */ 1507 need_nmi_exiting = 1; 1508 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1509 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1510 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1511 if ((info & VMCS_INTR_VALID) == 0) { 1512 vmx_inject_nmi(vmx, vcpu); 1513 need_nmi_exiting = 0; 1514 } else { 1515 VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Cannot " 1516 "inject NMI due to VM-entry intr info %#x", 1517 info); 1518 } 1519 } else { 1520 VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Cannot inject NMI " 1521 "due to Guest Interruptibility-state %#x", gi); 1522 } 1523 1524 if (need_nmi_exiting) 1525 vmx_set_nmi_window_exiting(vcpu); 1526 } 1527 1528 extint_pending = vm_extint_pending(vmx->vm, vcpu->vcpuid); 1529 1530 if (!extint_pending && virtual_interrupt_delivery) { 1531 vmx_inject_pir(vlapic); 1532 return; 1533 } 1534 1535 /* 1536 * If interrupt-window exiting is already in effect then don't bother 1537 * checking for pending interrupts. This is just an optimization and 1538 * not needed for correctness. 1539 */ 1540 if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) { 1541 VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Skip interrupt injection " 1542 "due to pending int_window_exiting"); 1543 return; 1544 } 1545 1546 if (!extint_pending) { 1547 /* Ask the local apic for a vector to inject */ 1548 if (!vlapic_pending_intr(vlapic, &vector)) 1549 return; 1550 1551 /* 1552 * From the Intel SDM, Volume 3, Section "Maskable 1553 * Hardware Interrupts": 1554 * - maskable interrupt vectors [16,255] can be delivered 1555 * through the local APIC. 1556 */ 1557 KASSERT(vector >= 16 && vector <= 255, 1558 ("invalid vector %d from local APIC", vector)); 1559 } else { 1560 /* Ask the legacy pic for a vector to inject */ 1561 vatpic_pending_intr(vmx->vm, &vector); 1562 1563 /* 1564 * From the Intel SDM, Volume 3, Section "Maskable 1565 * Hardware Interrupts": 1566 * - maskable interrupt vectors [0,255] can be delivered 1567 * through the INTR pin. 1568 */ 1569 KASSERT(vector >= 0 && vector <= 255, 1570 ("invalid vector %d from INTR", vector)); 1571 } 1572 1573 /* Check RFLAGS.IF and the interruptibility state of the guest */ 1574 rflags = vmcs_read(VMCS_GUEST_RFLAGS); 1575 if ((rflags & PSL_I) == 0) { 1576 VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due " 1577 "to rflags %#lx", vector, rflags); 1578 goto cantinject; 1579 } 1580 1581 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1582 if (gi & HWINTR_BLOCKING) { 1583 VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due " 1584 "to Guest Interruptibility-state %#x", vector, gi); 1585 goto cantinject; 1586 } 1587 1588 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1589 if (info & VMCS_INTR_VALID) { 1590 /* 1591 * This is expected and could happen for multiple reasons: 1592 * - A vectoring VM-entry was aborted due to astpending 1593 * - A VM-exit happened during event injection. 1594 * - An exception was injected above. 1595 * - An NMI was injected above or after "NMI window exiting" 1596 */ 1597 VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due " 1598 "to VM-entry intr info %#x", vector, info); 1599 goto cantinject; 1600 } 1601 1602 /* Inject the interrupt */ 1603 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID; 1604 info |= vector; 1605 vmcs_write(VMCS_ENTRY_INTR_INFO, info); 1606 1607 if (!extint_pending) { 1608 /* Update the Local APIC ISR */ 1609 vlapic_intr_accepted(vlapic, vector); 1610 } else { 1611 vm_extint_clear(vmx->vm, vcpu->vcpuid); 1612 vatpic_intr_accepted(vmx->vm, vector); 1613 1614 /* 1615 * After we accepted the current ExtINT the PIC may 1616 * have posted another one. If that is the case, set 1617 * the Interrupt Window Exiting execution control so 1618 * we can inject that one too. 1619 * 1620 * Also, interrupt window exiting allows us to inject any 1621 * pending APIC vector that was preempted by the ExtINT 1622 * as soon as possible. This applies both for the software 1623 * emulated vlapic and the hardware assisted virtual APIC. 1624 */ 1625 vmx_set_int_window_exiting(vcpu); 1626 } 1627 1628 VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Injecting hwintr at vector %d", 1629 vector); 1630 1631 return; 1632 1633 cantinject: 1634 /* 1635 * Set the Interrupt Window Exiting execution control so we can inject 1636 * the interrupt as soon as blocking condition goes away. 1637 */ 1638 vmx_set_int_window_exiting(vcpu); 1639 } 1640 1641 /* 1642 * If the Virtual NMIs execution control is '1' then the logical processor 1643 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1644 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1645 * virtual-NMI blocking. 1646 * 1647 * This unblocking occurs even if the IRET causes a fault. In this case the 1648 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1649 */ 1650 static void 1651 vmx_restore_nmi_blocking(struct vmx_vcpu *vcpu) 1652 { 1653 uint32_t gi; 1654 1655 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking"); 1656 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1657 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1658 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1659 } 1660 1661 static void 1662 vmx_clear_nmi_blocking(struct vmx_vcpu *vcpu) 1663 { 1664 uint32_t gi; 1665 1666 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking"); 1667 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1668 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1669 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1670 } 1671 1672 static void 1673 vmx_assert_nmi_blocking(struct vmx_vcpu *vcpu) 1674 { 1675 uint32_t gi __diagused; 1676 1677 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1678 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1679 ("NMI blocking is not in effect %#x", gi)); 1680 } 1681 1682 static int 1683 vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu, 1684 struct vm_exit *vmexit) 1685 { 1686 struct vmxctx *vmxctx; 1687 uint64_t xcrval; 1688 const struct xsave_limits *limits; 1689 1690 vmxctx = &vcpu->ctx; 1691 limits = vmm_get_xsave_limits(); 1692 1693 /* 1694 * Note that the processor raises a GP# fault on its own if 1695 * xsetbv is executed for CPL != 0, so we do not have to 1696 * emulate that fault here. 1697 */ 1698 1699 /* Only xcr0 is supported. */ 1700 if (vmxctx->guest_rcx != 0) { 1701 vm_inject_gp(vmx->vm, vcpu->vcpuid); 1702 return (HANDLED); 1703 } 1704 1705 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1706 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1707 vm_inject_ud(vmx->vm, vcpu->vcpuid); 1708 return (HANDLED); 1709 } 1710 1711 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1712 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1713 vm_inject_gp(vmx->vm, vcpu->vcpuid); 1714 return (HANDLED); 1715 } 1716 1717 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1718 vm_inject_gp(vmx->vm, vcpu->vcpuid); 1719 return (HANDLED); 1720 } 1721 1722 /* AVX (YMM_Hi128) requires SSE. */ 1723 if (xcrval & XFEATURE_ENABLED_AVX && 1724 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1725 vm_inject_gp(vmx->vm, vcpu->vcpuid); 1726 return (HANDLED); 1727 } 1728 1729 /* 1730 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1731 * ZMM_Hi256, and Hi16_ZMM. 1732 */ 1733 if (xcrval & XFEATURE_AVX512 && 1734 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1735 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1736 vm_inject_gp(vmx->vm, vcpu->vcpuid); 1737 return (HANDLED); 1738 } 1739 1740 /* 1741 * Intel MPX requires both bound register state flags to be 1742 * set. 1743 */ 1744 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1745 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1746 vm_inject_gp(vmx->vm, vcpu->vcpuid); 1747 return (HANDLED); 1748 } 1749 1750 /* 1751 * This runs "inside" vmrun() with the guest's FPU state, so 1752 * modifying xcr0 directly modifies the guest's xcr0, not the 1753 * host's. 1754 */ 1755 load_xcr(0, xcrval); 1756 return (HANDLED); 1757 } 1758 1759 static uint64_t 1760 vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident) 1761 { 1762 const struct vmxctx *vmxctx; 1763 1764 vmxctx = &vcpu->ctx; 1765 1766 switch (ident) { 1767 case 0: 1768 return (vmxctx->guest_rax); 1769 case 1: 1770 return (vmxctx->guest_rcx); 1771 case 2: 1772 return (vmxctx->guest_rdx); 1773 case 3: 1774 return (vmxctx->guest_rbx); 1775 case 4: 1776 return (vmcs_read(VMCS_GUEST_RSP)); 1777 case 5: 1778 return (vmxctx->guest_rbp); 1779 case 6: 1780 return (vmxctx->guest_rsi); 1781 case 7: 1782 return (vmxctx->guest_rdi); 1783 case 8: 1784 return (vmxctx->guest_r8); 1785 case 9: 1786 return (vmxctx->guest_r9); 1787 case 10: 1788 return (vmxctx->guest_r10); 1789 case 11: 1790 return (vmxctx->guest_r11); 1791 case 12: 1792 return (vmxctx->guest_r12); 1793 case 13: 1794 return (vmxctx->guest_r13); 1795 case 14: 1796 return (vmxctx->guest_r14); 1797 case 15: 1798 return (vmxctx->guest_r15); 1799 default: 1800 panic("invalid vmx register %d", ident); 1801 } 1802 } 1803 1804 static void 1805 vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval) 1806 { 1807 struct vmxctx *vmxctx; 1808 1809 vmxctx = &vcpu->ctx; 1810 1811 switch (ident) { 1812 case 0: 1813 vmxctx->guest_rax = regval; 1814 break; 1815 case 1: 1816 vmxctx->guest_rcx = regval; 1817 break; 1818 case 2: 1819 vmxctx->guest_rdx = regval; 1820 break; 1821 case 3: 1822 vmxctx->guest_rbx = regval; 1823 break; 1824 case 4: 1825 vmcs_write(VMCS_GUEST_RSP, regval); 1826 break; 1827 case 5: 1828 vmxctx->guest_rbp = regval; 1829 break; 1830 case 6: 1831 vmxctx->guest_rsi = regval; 1832 break; 1833 case 7: 1834 vmxctx->guest_rdi = regval; 1835 break; 1836 case 8: 1837 vmxctx->guest_r8 = regval; 1838 break; 1839 case 9: 1840 vmxctx->guest_r9 = regval; 1841 break; 1842 case 10: 1843 vmxctx->guest_r10 = regval; 1844 break; 1845 case 11: 1846 vmxctx->guest_r11 = regval; 1847 break; 1848 case 12: 1849 vmxctx->guest_r12 = regval; 1850 break; 1851 case 13: 1852 vmxctx->guest_r13 = regval; 1853 break; 1854 case 14: 1855 vmxctx->guest_r14 = regval; 1856 break; 1857 case 15: 1858 vmxctx->guest_r15 = regval; 1859 break; 1860 default: 1861 panic("invalid vmx register %d", ident); 1862 } 1863 } 1864 1865 static int 1866 vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual) 1867 { 1868 uint64_t crval, regval; 1869 1870 /* We only handle mov to %cr0 at this time */ 1871 if ((exitqual & 0xf0) != 0x00) 1872 return (UNHANDLED); 1873 1874 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); 1875 1876 vmcs_write(VMCS_CR0_SHADOW, regval); 1877 1878 crval = regval | cr0_ones_mask; 1879 crval &= ~cr0_zeros_mask; 1880 vmcs_write(VMCS_GUEST_CR0, crval); 1881 1882 if (regval & CR0_PG) { 1883 uint64_t efer, entry_ctls; 1884 1885 /* 1886 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and 1887 * the "IA-32e mode guest" bit in VM-entry control must be 1888 * equal. 1889 */ 1890 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1891 if (efer & EFER_LME) { 1892 efer |= EFER_LMA; 1893 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1894 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS); 1895 entry_ctls |= VM_ENTRY_GUEST_LMA; 1896 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 1897 } 1898 } 1899 1900 return (HANDLED); 1901 } 1902 1903 static int 1904 vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual) 1905 { 1906 uint64_t crval, regval; 1907 1908 /* We only handle mov to %cr4 at this time */ 1909 if ((exitqual & 0xf0) != 0x00) 1910 return (UNHANDLED); 1911 1912 regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf); 1913 1914 vmcs_write(VMCS_CR4_SHADOW, regval); 1915 1916 crval = regval | cr4_ones_mask; 1917 crval &= ~cr4_zeros_mask; 1918 vmcs_write(VMCS_GUEST_CR4, crval); 1919 1920 return (HANDLED); 1921 } 1922 1923 static int 1924 vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu, 1925 uint64_t exitqual) 1926 { 1927 struct vlapic *vlapic; 1928 uint64_t cr8; 1929 int regnum; 1930 1931 /* We only handle mov %cr8 to/from a register at this time. */ 1932 if ((exitqual & 0xe0) != 0x00) { 1933 return (UNHANDLED); 1934 } 1935 1936 vlapic = vm_lapic(vmx->vm, vcpu->vcpuid); 1937 regnum = (exitqual >> 8) & 0xf; 1938 if (exitqual & 0x10) { 1939 cr8 = vlapic_get_cr8(vlapic); 1940 vmx_set_guest_reg(vcpu, regnum, cr8); 1941 } else { 1942 cr8 = vmx_get_guest_reg(vcpu, regnum); 1943 vlapic_set_cr8(vlapic, cr8); 1944 } 1945 1946 return (HANDLED); 1947 } 1948 1949 /* 1950 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1951 */ 1952 static int 1953 vmx_cpl(void) 1954 { 1955 uint32_t ssar; 1956 1957 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1958 return ((ssar >> 5) & 0x3); 1959 } 1960 1961 static enum vm_cpu_mode 1962 vmx_cpu_mode(void) 1963 { 1964 uint32_t csar; 1965 1966 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1967 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1968 if (csar & 0x2000) 1969 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1970 else 1971 return (CPU_MODE_COMPATIBILITY); 1972 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1973 return (CPU_MODE_PROTECTED); 1974 } else { 1975 return (CPU_MODE_REAL); 1976 } 1977 } 1978 1979 static enum vm_paging_mode 1980 vmx_paging_mode(void) 1981 { 1982 uint64_t cr4; 1983 1984 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1985 return (PAGING_MODE_FLAT); 1986 cr4 = vmcs_read(VMCS_GUEST_CR4); 1987 if (!(cr4 & CR4_PAE)) 1988 return (PAGING_MODE_32); 1989 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) { 1990 if (!(cr4 & CR4_LA57)) 1991 return (PAGING_MODE_64); 1992 return (PAGING_MODE_64_LA57); 1993 } else 1994 return (PAGING_MODE_PAE); 1995 } 1996 1997 static uint64_t 1998 inout_str_index(struct vmx_vcpu *vcpu, int in) 1999 { 2000 uint64_t val; 2001 int error __diagused; 2002 enum vm_reg_name reg; 2003 2004 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI; 2005 error = vmx_getreg(vcpu, reg, &val); 2006 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error)); 2007 return (val); 2008 } 2009 2010 static uint64_t 2011 inout_str_count(struct vmx_vcpu *vcpu, int rep) 2012 { 2013 uint64_t val; 2014 int error __diagused; 2015 2016 if (rep) { 2017 error = vmx_getreg(vcpu, VM_REG_GUEST_RCX, &val); 2018 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error)); 2019 } else { 2020 val = 1; 2021 } 2022 return (val); 2023 } 2024 2025 static int 2026 inout_str_addrsize(uint32_t inst_info) 2027 { 2028 uint32_t size; 2029 2030 size = (inst_info >> 7) & 0x7; 2031 switch (size) { 2032 case 0: 2033 return (2); /* 16 bit */ 2034 case 1: 2035 return (4); /* 32 bit */ 2036 case 2: 2037 return (8); /* 64 bit */ 2038 default: 2039 panic("%s: invalid size encoding %d", __func__, size); 2040 } 2041 } 2042 2043 static void 2044 inout_str_seginfo(struct vmx_vcpu *vcpu, uint32_t inst_info, int in, 2045 struct vm_inout_str *vis) 2046 { 2047 int error __diagused, s; 2048 2049 if (in) { 2050 vis->seg_name = VM_REG_GUEST_ES; 2051 } else { 2052 s = (inst_info >> 15) & 0x7; 2053 vis->seg_name = vm_segment_name(s); 2054 } 2055 2056 error = vmx_getdesc(vcpu, vis->seg_name, &vis->seg_desc); 2057 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error)); 2058 } 2059 2060 static void 2061 vmx_paging_info(struct vm_guest_paging *paging) 2062 { 2063 paging->cr3 = vmcs_guest_cr3(); 2064 paging->cpl = vmx_cpl(); 2065 paging->cpu_mode = vmx_cpu_mode(); 2066 paging->paging_mode = vmx_paging_mode(); 2067 } 2068 2069 static void 2070 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla) 2071 { 2072 struct vm_guest_paging *paging; 2073 uint32_t csar; 2074 2075 paging = &vmexit->u.inst_emul.paging; 2076 2077 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 2078 vmexit->inst_length = 0; 2079 vmexit->u.inst_emul.gpa = gpa; 2080 vmexit->u.inst_emul.gla = gla; 2081 vmx_paging_info(paging); 2082 switch (paging->cpu_mode) { 2083 case CPU_MODE_REAL: 2084 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 2085 vmexit->u.inst_emul.cs_d = 0; 2086 break; 2087 case CPU_MODE_PROTECTED: 2088 case CPU_MODE_COMPATIBILITY: 2089 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 2090 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 2091 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar); 2092 break; 2093 default: 2094 vmexit->u.inst_emul.cs_base = 0; 2095 vmexit->u.inst_emul.cs_d = 0; 2096 break; 2097 } 2098 vie_init(&vmexit->u.inst_emul.vie, NULL, 0); 2099 } 2100 2101 static int 2102 ept_fault_type(uint64_t ept_qual) 2103 { 2104 int fault_type; 2105 2106 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 2107 fault_type = VM_PROT_WRITE; 2108 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 2109 fault_type = VM_PROT_EXECUTE; 2110 else 2111 fault_type= VM_PROT_READ; 2112 2113 return (fault_type); 2114 } 2115 2116 static bool 2117 ept_emulation_fault(uint64_t ept_qual) 2118 { 2119 int read, write; 2120 2121 /* EPT fault on an instruction fetch doesn't make sense here */ 2122 if (ept_qual & EPT_VIOLATION_INST_FETCH) 2123 return (false); 2124 2125 /* EPT fault must be a read fault or a write fault */ 2126 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 2127 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 2128 if ((read | write) == 0) 2129 return (false); 2130 2131 /* 2132 * The EPT violation must have been caused by accessing a 2133 * guest-physical address that is a translation of a guest-linear 2134 * address. 2135 */ 2136 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 2137 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 2138 return (false); 2139 } 2140 2141 return (true); 2142 } 2143 2144 static __inline int 2145 apic_access_virtualization(struct vmx_vcpu *vcpu) 2146 { 2147 uint32_t proc_ctls2; 2148 2149 proc_ctls2 = vcpu->cap.proc_ctls2; 2150 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 2151 } 2152 2153 static __inline int 2154 x2apic_virtualization(struct vmx_vcpu *vcpu) 2155 { 2156 uint32_t proc_ctls2; 2157 2158 proc_ctls2 = vcpu->cap.proc_ctls2; 2159 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 2160 } 2161 2162 static int 2163 vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic, 2164 uint64_t qual) 2165 { 2166 int error, handled, offset; 2167 uint32_t *apic_regs, vector; 2168 bool retu; 2169 2170 handled = HANDLED; 2171 offset = APIC_WRITE_OFFSET(qual); 2172 2173 if (!apic_access_virtualization(vcpu)) { 2174 /* 2175 * In general there should not be any APIC write VM-exits 2176 * unless APIC-access virtualization is enabled. 2177 * 2178 * However self-IPI virtualization can legitimately trigger 2179 * an APIC-write VM-exit so treat it specially. 2180 */ 2181 if (x2apic_virtualization(vcpu) && 2182 offset == APIC_OFFSET_SELF_IPI) { 2183 apic_regs = (uint32_t *)(vlapic->apic_page); 2184 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4]; 2185 vlapic_self_ipi_handler(vlapic, vector); 2186 return (HANDLED); 2187 } else 2188 return (UNHANDLED); 2189 } 2190 2191 switch (offset) { 2192 case APIC_OFFSET_ID: 2193 vlapic_id_write_handler(vlapic); 2194 break; 2195 case APIC_OFFSET_LDR: 2196 vlapic_ldr_write_handler(vlapic); 2197 break; 2198 case APIC_OFFSET_DFR: 2199 vlapic_dfr_write_handler(vlapic); 2200 break; 2201 case APIC_OFFSET_SVR: 2202 vlapic_svr_write_handler(vlapic); 2203 break; 2204 case APIC_OFFSET_ESR: 2205 vlapic_esr_write_handler(vlapic); 2206 break; 2207 case APIC_OFFSET_ICR_LOW: 2208 retu = false; 2209 error = vlapic_icrlo_write_handler(vlapic, &retu); 2210 if (error != 0 || retu) 2211 handled = UNHANDLED; 2212 break; 2213 case APIC_OFFSET_CMCI_LVT: 2214 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 2215 vlapic_lvt_write_handler(vlapic, offset); 2216 break; 2217 case APIC_OFFSET_TIMER_ICR: 2218 vlapic_icrtmr_write_handler(vlapic); 2219 break; 2220 case APIC_OFFSET_TIMER_DCR: 2221 vlapic_dcr_write_handler(vlapic); 2222 break; 2223 default: 2224 handled = UNHANDLED; 2225 break; 2226 } 2227 return (handled); 2228 } 2229 2230 static bool 2231 apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa) 2232 { 2233 2234 if (apic_access_virtualization(vcpu) && 2235 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2236 return (true); 2237 else 2238 return (false); 2239 } 2240 2241 static int 2242 vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) 2243 { 2244 uint64_t qual; 2245 int access_type, offset, allowed; 2246 2247 if (!apic_access_virtualization(vcpu)) 2248 return (UNHANDLED); 2249 2250 qual = vmexit->u.vmx.exit_qualification; 2251 access_type = APIC_ACCESS_TYPE(qual); 2252 offset = APIC_ACCESS_OFFSET(qual); 2253 2254 allowed = 0; 2255 if (access_type == 0) { 2256 /* 2257 * Read data access to the following registers is expected. 2258 */ 2259 switch (offset) { 2260 case APIC_OFFSET_APR: 2261 case APIC_OFFSET_PPR: 2262 case APIC_OFFSET_RRR: 2263 case APIC_OFFSET_CMCI_LVT: 2264 case APIC_OFFSET_TIMER_CCR: 2265 allowed = 1; 2266 break; 2267 default: 2268 break; 2269 } 2270 } else if (access_type == 1) { 2271 /* 2272 * Write data access to the following registers is expected. 2273 */ 2274 switch (offset) { 2275 case APIC_OFFSET_VER: 2276 case APIC_OFFSET_APR: 2277 case APIC_OFFSET_PPR: 2278 case APIC_OFFSET_RRR: 2279 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2280 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2281 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2282 case APIC_OFFSET_CMCI_LVT: 2283 case APIC_OFFSET_TIMER_CCR: 2284 allowed = 1; 2285 break; 2286 default: 2287 break; 2288 } 2289 } 2290 2291 if (allowed) { 2292 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset, 2293 VIE_INVALID_GLA); 2294 } 2295 2296 /* 2297 * Regardless of whether the APIC-access is allowed this handler 2298 * always returns UNHANDLED: 2299 * - if the access is allowed then it is handled by emulating the 2300 * instruction that caused the VM-exit (outside the critical section) 2301 * - if the access is not allowed then it will be converted to an 2302 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2303 */ 2304 return (UNHANDLED); 2305 } 2306 2307 static enum task_switch_reason 2308 vmx_task_switch_reason(uint64_t qual) 2309 { 2310 int reason; 2311 2312 reason = (qual >> 30) & 0x3; 2313 switch (reason) { 2314 case 0: 2315 return (TSR_CALL); 2316 case 1: 2317 return (TSR_IRET); 2318 case 2: 2319 return (TSR_JMP); 2320 case 3: 2321 return (TSR_IDT_GATE); 2322 default: 2323 panic("%s: invalid reason %d", __func__, reason); 2324 } 2325 } 2326 2327 static int 2328 emulate_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val, 2329 bool *retu) 2330 { 2331 int error; 2332 2333 if (lapic_msr(num)) 2334 error = lapic_wrmsr(vmx->vm, vcpu->vcpuid, num, val, retu); 2335 else 2336 error = vmx_wrmsr(vmx, vcpu, num, val, retu); 2337 2338 return (error); 2339 } 2340 2341 static int 2342 emulate_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, bool *retu) 2343 { 2344 struct vmxctx *vmxctx; 2345 uint64_t result; 2346 uint32_t eax, edx; 2347 int error; 2348 2349 if (lapic_msr(num)) 2350 error = lapic_rdmsr(vmx->vm, vcpu->vcpuid, num, &result, retu); 2351 else 2352 error = vmx_rdmsr(vmx, vcpu, num, &result, retu); 2353 2354 if (error == 0) { 2355 eax = result; 2356 vmxctx = &vcpu->ctx; 2357 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax); 2358 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error)); 2359 2360 edx = result >> 32; 2361 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx); 2362 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error)); 2363 } 2364 2365 return (error); 2366 } 2367 2368 static int 2369 vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit) 2370 { 2371 int error, errcode, errcode_valid, handled, in; 2372 struct vmxctx *vmxctx; 2373 struct vlapic *vlapic; 2374 struct vm_inout_str *vis; 2375 struct vm_task_switch *ts; 2376 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info; 2377 uint32_t intr_type, intr_vec, reason; 2378 uint64_t exitintinfo, qual, gpa; 2379 int vcpuid; 2380 bool retu; 2381 2382 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2383 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2384 2385 handled = UNHANDLED; 2386 vmxctx = &vcpu->ctx; 2387 vcpuid = vcpu->vcpuid; 2388 2389 qual = vmexit->u.vmx.exit_qualification; 2390 reason = vmexit->u.vmx.exit_reason; 2391 vmexit->exitcode = VM_EXITCODE_BOGUS; 2392 2393 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_COUNT, 1); 2394 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpuid, vmexit); 2395 2396 /* 2397 * VM-entry failures during or after loading guest state. 2398 * 2399 * These VM-exits are uncommon but must be handled specially 2400 * as most VM-exit fields are not populated as usual. 2401 */ 2402 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) { 2403 VCPU_CTR0(vmx->vm, vcpuid, "Handling MCE during VM-entry"); 2404 __asm __volatile("int $18"); 2405 return (1); 2406 } 2407 2408 /* 2409 * VM exits that can be triggered during event delivery need to 2410 * be handled specially by re-injecting the event if the IDT 2411 * vectoring information field's valid bit is set. 2412 * 2413 * See "Information for VM Exits During Event Delivery" in Intel SDM 2414 * for details. 2415 */ 2416 idtvec_info = vmcs_idt_vectoring_info(); 2417 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2418 idtvec_info &= ~(1 << 12); /* clear undefined bit */ 2419 exitintinfo = idtvec_info; 2420 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2421 idtvec_err = vmcs_idt_vectoring_err(); 2422 exitintinfo |= (uint64_t)idtvec_err << 32; 2423 } 2424 error = vm_exit_intinfo(vmx->vm, vcpuid, exitintinfo); 2425 KASSERT(error == 0, ("%s: vm_set_intinfo error %d", 2426 __func__, error)); 2427 2428 /* 2429 * If 'virtual NMIs' are being used and the VM-exit 2430 * happened while injecting an NMI during the previous 2431 * VM-entry, then clear "blocking by NMI" in the 2432 * Guest Interruptibility-State so the NMI can be 2433 * reinjected on the subsequent VM-entry. 2434 * 2435 * However, if the NMI was being delivered through a task 2436 * gate, then the new task must start execution with NMIs 2437 * blocked so don't clear NMI blocking in this case. 2438 */ 2439 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2440 if (intr_type == VMCS_INTR_T_NMI) { 2441 if (reason != EXIT_REASON_TASK_SWITCH) 2442 vmx_clear_nmi_blocking(vcpu); 2443 else 2444 vmx_assert_nmi_blocking(vcpu); 2445 } 2446 2447 /* 2448 * Update VM-entry instruction length if the event being 2449 * delivered was a software interrupt or software exception. 2450 */ 2451 if (intr_type == VMCS_INTR_T_SWINTR || 2452 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2453 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2454 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2455 } 2456 } 2457 2458 switch (reason) { 2459 case EXIT_REASON_TASK_SWITCH: 2460 ts = &vmexit->u.task_switch; 2461 ts->tsssel = qual & 0xffff; 2462 ts->reason = vmx_task_switch_reason(qual); 2463 ts->ext = 0; 2464 ts->errcode_valid = 0; 2465 vmx_paging_info(&ts->paging); 2466 /* 2467 * If the task switch was due to a CALL, JMP, IRET, software 2468 * interrupt (INT n) or software exception (INT3, INTO), 2469 * then the saved %rip references the instruction that caused 2470 * the task switch. The instruction length field in the VMCS 2471 * is valid in this case. 2472 * 2473 * In all other cases (e.g., NMI, hardware exception) the 2474 * saved %rip is one that would have been saved in the old TSS 2475 * had the task switch completed normally so the instruction 2476 * length field is not needed in this case and is explicitly 2477 * set to 0. 2478 */ 2479 if (ts->reason == TSR_IDT_GATE) { 2480 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2481 ("invalid idtvec_info %#x for IDT task switch", 2482 idtvec_info)); 2483 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2484 if (intr_type != VMCS_INTR_T_SWINTR && 2485 intr_type != VMCS_INTR_T_SWEXCEPTION && 2486 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2487 /* Task switch triggered by external event */ 2488 ts->ext = 1; 2489 vmexit->inst_length = 0; 2490 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2491 ts->errcode_valid = 1; 2492 ts->errcode = vmcs_idt_vectoring_err(); 2493 } 2494 } 2495 } 2496 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2497 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpuid, vmexit, ts); 2498 VCPU_CTR4(vmx->vm, vcpuid, "task switch reason %d, tss 0x%04x, " 2499 "%s errcode 0x%016lx", ts->reason, ts->tsssel, 2500 ts->ext ? "external" : "internal", 2501 ((uint64_t)ts->errcode << 32) | ts->errcode_valid); 2502 break; 2503 case EXIT_REASON_CR_ACCESS: 2504 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CR_ACCESS, 1); 2505 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpuid, vmexit, qual); 2506 switch (qual & 0xf) { 2507 case 0: 2508 handled = vmx_emulate_cr0_access(vcpu, qual); 2509 break; 2510 case 4: 2511 handled = vmx_emulate_cr4_access(vcpu, qual); 2512 break; 2513 case 8: 2514 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2515 break; 2516 } 2517 break; 2518 case EXIT_REASON_RDMSR: 2519 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2520 retu = false; 2521 ecx = vmxctx->guest_rcx; 2522 VCPU_CTR1(vmx->vm, vcpuid, "rdmsr 0x%08x", ecx); 2523 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx); 2524 error = emulate_rdmsr(vmx, vcpu, ecx, &retu); 2525 if (error) { 2526 vmexit->exitcode = VM_EXITCODE_RDMSR; 2527 vmexit->u.msr.code = ecx; 2528 } else if (!retu) { 2529 handled = HANDLED; 2530 } else { 2531 /* Return to userspace with a valid exitcode */ 2532 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2533 ("emulate_rdmsr retu with bogus exitcode")); 2534 } 2535 break; 2536 case EXIT_REASON_WRMSR: 2537 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2538 retu = false; 2539 eax = vmxctx->guest_rax; 2540 ecx = vmxctx->guest_rcx; 2541 edx = vmxctx->guest_rdx; 2542 VCPU_CTR2(vmx->vm, vcpuid, "wrmsr 0x%08x value 0x%016lx", 2543 ecx, (uint64_t)edx << 32 | eax); 2544 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx, 2545 (uint64_t)edx << 32 | eax); 2546 error = emulate_wrmsr(vmx, vcpu, ecx, 2547 (uint64_t)edx << 32 | eax, &retu); 2548 if (error) { 2549 vmexit->exitcode = VM_EXITCODE_WRMSR; 2550 vmexit->u.msr.code = ecx; 2551 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax; 2552 } else if (!retu) { 2553 handled = HANDLED; 2554 } else { 2555 /* Return to userspace with a valid exitcode */ 2556 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, 2557 ("emulate_wrmsr retu with bogus exitcode")); 2558 } 2559 break; 2560 case EXIT_REASON_HLT: 2561 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_HLT, 1); 2562 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpuid, vmexit); 2563 vmexit->exitcode = VM_EXITCODE_HLT; 2564 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2565 if (virtual_interrupt_delivery) 2566 vmexit->u.hlt.intr_status = 2567 vmcs_read(VMCS_GUEST_INTR_STATUS); 2568 else 2569 vmexit->u.hlt.intr_status = 0; 2570 break; 2571 case EXIT_REASON_MTF: 2572 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_MTRAP, 1); 2573 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpuid, vmexit); 2574 vmexit->exitcode = VM_EXITCODE_MTRAP; 2575 vmexit->inst_length = 0; 2576 break; 2577 case EXIT_REASON_PAUSE: 2578 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_PAUSE, 1); 2579 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpuid, vmexit); 2580 vmexit->exitcode = VM_EXITCODE_PAUSE; 2581 break; 2582 case EXIT_REASON_INTR_WINDOW: 2583 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INTR_WINDOW, 1); 2584 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit); 2585 vmx_clear_int_window_exiting(vcpu); 2586 return (1); 2587 case EXIT_REASON_EXT_INTR: 2588 /* 2589 * External interrupts serve only to cause VM exits and allow 2590 * the host interrupt handler to run. 2591 * 2592 * If this external interrupt triggers a virtual interrupt 2593 * to a VM, then that state will be recorded by the 2594 * host interrupt handler in the VM's softc. We will inject 2595 * this virtual interrupt during the subsequent VM enter. 2596 */ 2597 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2598 SDT_PROBE4(vmm, vmx, exit, interrupt, 2599 vmx, vcpuid, vmexit, intr_info); 2600 2601 /* 2602 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2603 * This appears to be a bug in VMware Fusion? 2604 */ 2605 if (!(intr_info & VMCS_INTR_VALID)) 2606 return (1); 2607 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2608 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2609 ("VM exit interruption info invalid: %#x", intr_info)); 2610 vmx_trigger_hostintr(intr_info & 0xff); 2611 2612 /* 2613 * This is special. We want to treat this as an 'handled' 2614 * VM-exit but not increment the instruction pointer. 2615 */ 2616 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXTINT, 1); 2617 return (1); 2618 case EXIT_REASON_NMI_WINDOW: 2619 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit); 2620 /* Exit to allow the pending virtual NMI to be injected */ 2621 if (vm_nmi_pending(vmx->vm, vcpuid)) 2622 vmx_inject_nmi(vmx, vcpu); 2623 vmx_clear_nmi_window_exiting(vcpu); 2624 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NMI_WINDOW, 1); 2625 return (1); 2626 case EXIT_REASON_INOUT: 2627 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INOUT, 1); 2628 vmexit->exitcode = VM_EXITCODE_INOUT; 2629 vmexit->u.inout.bytes = (qual & 0x7) + 1; 2630 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0; 2631 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0; 2632 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0; 2633 vmexit->u.inout.port = (uint16_t)(qual >> 16); 2634 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax); 2635 if (vmexit->u.inout.string) { 2636 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 2637 vmexit->exitcode = VM_EXITCODE_INOUT_STR; 2638 vis = &vmexit->u.inout_str; 2639 vmx_paging_info(&vis->paging); 2640 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2641 vis->cr0 = vmcs_read(VMCS_GUEST_CR0); 2642 vis->index = inout_str_index(vcpu, in); 2643 vis->count = inout_str_count(vcpu, vis->inout.rep); 2644 vis->addrsize = inout_str_addrsize(inst_info); 2645 inout_str_seginfo(vcpu, inst_info, in, vis); 2646 } 2647 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit); 2648 break; 2649 case EXIT_REASON_CPUID: 2650 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CPUID, 1); 2651 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit); 2652 handled = vmx_handle_cpuid(vmx->vm, vcpuid, vmxctx); 2653 break; 2654 case EXIT_REASON_EXCEPTION: 2655 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXCEPTION, 1); 2656 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2657 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2658 ("VM exit interruption info invalid: %#x", intr_info)); 2659 2660 intr_vec = intr_info & 0xff; 2661 intr_type = intr_info & VMCS_INTR_T_MASK; 2662 2663 /* 2664 * If Virtual NMIs control is 1 and the VM-exit is due to a 2665 * fault encountered during the execution of IRET then we must 2666 * restore the state of "virtual-NMI blocking" before resuming 2667 * the guest. 2668 * 2669 * See "Resuming Guest Software after Handling an Exception". 2670 * See "Information for VM Exits Due to Vectored Events". 2671 */ 2672 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2673 (intr_vec != IDT_DF) && 2674 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2675 vmx_restore_nmi_blocking(vcpu); 2676 2677 /* 2678 * The NMI has already been handled in vmx_exit_handle_nmi(). 2679 */ 2680 if (intr_type == VMCS_INTR_T_NMI) 2681 return (1); 2682 2683 /* 2684 * Call the machine check handler by hand. Also don't reflect 2685 * the machine check back into the guest. 2686 */ 2687 if (intr_vec == IDT_MC) { 2688 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to MCE handler"); 2689 __asm __volatile("int $18"); 2690 return (1); 2691 } 2692 2693 /* 2694 * If the hypervisor has requested user exits for 2695 * debug exceptions, bounce them out to userland. 2696 */ 2697 if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP && 2698 (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) { 2699 vmexit->exitcode = VM_EXITCODE_BPT; 2700 vmexit->u.bpt.inst_length = vmexit->inst_length; 2701 vmexit->inst_length = 0; 2702 break; 2703 } 2704 2705 if (intr_vec == IDT_PF) { 2706 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual); 2707 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d", 2708 __func__, error)); 2709 } 2710 2711 /* 2712 * Software exceptions exhibit trap-like behavior. This in 2713 * turn requires populating the VM-entry instruction length 2714 * so that the %rip in the trap frame is past the INT3/INTO 2715 * instruction. 2716 */ 2717 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2718 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2719 2720 /* Reflect all other exceptions back into the guest */ 2721 errcode_valid = errcode = 0; 2722 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2723 errcode_valid = 1; 2724 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2725 } 2726 VCPU_CTR2(vmx->vm, vcpuid, "Reflecting exception %d/%#x into " 2727 "the guest", intr_vec, errcode); 2728 SDT_PROBE5(vmm, vmx, exit, exception, 2729 vmx, vcpuid, vmexit, intr_vec, errcode); 2730 error = vm_inject_exception(vmx->vm, vcpuid, intr_vec, 2731 errcode_valid, errcode, 0); 2732 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2733 __func__, error)); 2734 return (1); 2735 2736 case EXIT_REASON_EPT_FAULT: 2737 /* 2738 * If 'gpa' lies within the address space allocated to 2739 * memory then this must be a nested page fault otherwise 2740 * this must be an instruction that accesses MMIO space. 2741 */ 2742 gpa = vmcs_gpa(); 2743 if (vm_mem_allocated(vmx->vm, vcpuid, gpa) || 2744 apic_access_fault(vcpu, gpa)) { 2745 vmexit->exitcode = VM_EXITCODE_PAGING; 2746 vmexit->inst_length = 0; 2747 vmexit->u.paging.gpa = gpa; 2748 vmexit->u.paging.fault_type = ept_fault_type(qual); 2749 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NESTED_FAULT, 1); 2750 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2751 vmx, vcpuid, vmexit, gpa, qual); 2752 } else if (ept_emulation_fault(qual)) { 2753 vmexit_inst_emul(vmexit, gpa, vmcs_gla()); 2754 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INST_EMUL, 1); 2755 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2756 vmx, vcpuid, vmexit, gpa); 2757 } 2758 /* 2759 * If Virtual NMIs control is 1 and the VM-exit is due to an 2760 * EPT fault during the execution of IRET then we must restore 2761 * the state of "virtual-NMI blocking" before resuming. 2762 * 2763 * See description of "NMI unblocking due to IRET" in 2764 * "Exit Qualification for EPT Violations". 2765 */ 2766 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2767 (qual & EXIT_QUAL_NMIUDTI) != 0) 2768 vmx_restore_nmi_blocking(vcpu); 2769 break; 2770 case EXIT_REASON_VIRTUALIZED_EOI: 2771 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2772 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2773 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpuid, vmexit); 2774 vmexit->inst_length = 0; /* trap-like */ 2775 break; 2776 case EXIT_REASON_APIC_ACCESS: 2777 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpuid, vmexit); 2778 handled = vmx_handle_apic_access(vcpu, vmexit); 2779 break; 2780 case EXIT_REASON_APIC_WRITE: 2781 /* 2782 * APIC-write VM exit is trap-like so the %rip is already 2783 * pointing to the next instruction. 2784 */ 2785 vmexit->inst_length = 0; 2786 vlapic = vm_lapic(vmx->vm, vcpuid); 2787 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2788 vmx, vcpuid, vmexit, vlapic); 2789 handled = vmx_handle_apic_write(vcpu, vlapic, qual); 2790 break; 2791 case EXIT_REASON_XSETBV: 2792 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpuid, vmexit); 2793 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2794 break; 2795 case EXIT_REASON_MONITOR: 2796 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpuid, vmexit); 2797 vmexit->exitcode = VM_EXITCODE_MONITOR; 2798 break; 2799 case EXIT_REASON_MWAIT: 2800 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpuid, vmexit); 2801 vmexit->exitcode = VM_EXITCODE_MWAIT; 2802 break; 2803 case EXIT_REASON_TPR: 2804 vlapic = vm_lapic(vmx->vm, vcpuid); 2805 vlapic_sync_tpr(vlapic); 2806 vmexit->inst_length = 0; 2807 handled = HANDLED; 2808 break; 2809 case EXIT_REASON_VMCALL: 2810 case EXIT_REASON_VMCLEAR: 2811 case EXIT_REASON_VMLAUNCH: 2812 case EXIT_REASON_VMPTRLD: 2813 case EXIT_REASON_VMPTRST: 2814 case EXIT_REASON_VMREAD: 2815 case EXIT_REASON_VMRESUME: 2816 case EXIT_REASON_VMWRITE: 2817 case EXIT_REASON_VMXOFF: 2818 case EXIT_REASON_VMXON: 2819 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpuid, vmexit); 2820 vmexit->exitcode = VM_EXITCODE_VMINSN; 2821 break; 2822 case EXIT_REASON_INVD: 2823 case EXIT_REASON_WBINVD: 2824 /* ignore exit */ 2825 handled = HANDLED; 2826 break; 2827 default: 2828 SDT_PROBE4(vmm, vmx, exit, unknown, 2829 vmx, vcpuid, vmexit, reason); 2830 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_UNKNOWN, 1); 2831 break; 2832 } 2833 2834 if (handled) { 2835 /* 2836 * It is possible that control is returned to userland 2837 * even though we were able to handle the VM exit in the 2838 * kernel. 2839 * 2840 * In such a case we want to make sure that the userland 2841 * restarts guest execution at the instruction *after* 2842 * the one we just processed. Therefore we update the 2843 * guest rip in the VMCS and in 'vmexit'. 2844 */ 2845 vmexit->rip += vmexit->inst_length; 2846 vmexit->inst_length = 0; 2847 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2848 } else { 2849 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2850 /* 2851 * If this VM exit was not claimed by anybody then 2852 * treat it as a generic VMX exit. 2853 */ 2854 vmexit->exitcode = VM_EXITCODE_VMX; 2855 vmexit->u.vmx.status = VM_SUCCESS; 2856 vmexit->u.vmx.inst_type = 0; 2857 vmexit->u.vmx.inst_error = 0; 2858 } else { 2859 /* 2860 * The exitcode and collateral have been populated. 2861 * The VM exit will be processed further in userland. 2862 */ 2863 } 2864 } 2865 2866 SDT_PROBE4(vmm, vmx, exit, return, 2867 vmx, vcpuid, vmexit, handled); 2868 return (handled); 2869 } 2870 2871 static __inline void 2872 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2873 { 2874 2875 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2876 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2877 vmxctx->inst_fail_status)); 2878 2879 vmexit->inst_length = 0; 2880 vmexit->exitcode = VM_EXITCODE_VMX; 2881 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2882 vmexit->u.vmx.inst_error = vmcs_instruction_error(); 2883 vmexit->u.vmx.exit_reason = ~0; 2884 vmexit->u.vmx.exit_qualification = ~0; 2885 2886 switch (rc) { 2887 case VMX_VMRESUME_ERROR: 2888 case VMX_VMLAUNCH_ERROR: 2889 vmexit->u.vmx.inst_type = rc; 2890 break; 2891 default: 2892 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2893 } 2894 } 2895 2896 /* 2897 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2898 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2899 * sufficient to simply vector to the NMI handler via a software interrupt. 2900 * However, this must be done before maskable interrupts are enabled 2901 * otherwise the "iret" issued by an interrupt handler will incorrectly 2902 * clear NMI blocking. 2903 */ 2904 static __inline void 2905 vmx_exit_handle_nmi(struct vmx_vcpu *vcpu, struct vm_exit *vmexit) 2906 { 2907 uint32_t intr_info; 2908 2909 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled")); 2910 2911 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION) 2912 return; 2913 2914 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2915 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2916 ("VM exit interruption info invalid: %#x", intr_info)); 2917 2918 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2919 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due " 2920 "to NMI has invalid vector: %#x", intr_info)); 2921 VCPU_CTR0(vcpu->vmx->vm, vcpu->vcpuid, 2922 "Vectoring to NMI handler"); 2923 __asm __volatile("int $2"); 2924 } 2925 } 2926 2927 static __inline void 2928 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2929 { 2930 register_t rflags; 2931 2932 /* Save host control debug registers. */ 2933 vmxctx->host_dr7 = rdr7(); 2934 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2935 2936 /* 2937 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2938 * exceptions in the host based on the guest DRx values. The 2939 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2940 */ 2941 load_dr7(0); 2942 wrmsr(MSR_DEBUGCTLMSR, 0); 2943 2944 /* 2945 * Disable single stepping the kernel to avoid corrupting the 2946 * guest DR6. A debugger might still be able to corrupt the 2947 * guest DR6 by setting a breakpoint after this point and then 2948 * single stepping. 2949 */ 2950 rflags = read_rflags(); 2951 vmxctx->host_tf = rflags & PSL_T; 2952 write_rflags(rflags & ~PSL_T); 2953 2954 /* Save host debug registers. */ 2955 vmxctx->host_dr0 = rdr0(); 2956 vmxctx->host_dr1 = rdr1(); 2957 vmxctx->host_dr2 = rdr2(); 2958 vmxctx->host_dr3 = rdr3(); 2959 vmxctx->host_dr6 = rdr6(); 2960 2961 /* Restore guest debug registers. */ 2962 load_dr0(vmxctx->guest_dr0); 2963 load_dr1(vmxctx->guest_dr1); 2964 load_dr2(vmxctx->guest_dr2); 2965 load_dr3(vmxctx->guest_dr3); 2966 load_dr6(vmxctx->guest_dr6); 2967 } 2968 2969 static __inline void 2970 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2971 { 2972 2973 /* Save guest debug registers. */ 2974 vmxctx->guest_dr0 = rdr0(); 2975 vmxctx->guest_dr1 = rdr1(); 2976 vmxctx->guest_dr2 = rdr2(); 2977 vmxctx->guest_dr3 = rdr3(); 2978 vmxctx->guest_dr6 = rdr6(); 2979 2980 /* 2981 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2982 * PSL_T last. 2983 */ 2984 load_dr0(vmxctx->host_dr0); 2985 load_dr1(vmxctx->host_dr1); 2986 load_dr2(vmxctx->host_dr2); 2987 load_dr3(vmxctx->host_dr3); 2988 load_dr6(vmxctx->host_dr6); 2989 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2990 load_dr7(vmxctx->host_dr7); 2991 write_rflags(read_rflags() | vmxctx->host_tf); 2992 } 2993 2994 static __inline void 2995 vmx_pmap_activate(struct vmx *vmx, pmap_t pmap) 2996 { 2997 long eptgen; 2998 int cpu; 2999 3000 cpu = curcpu; 3001 3002 CPU_SET_ATOMIC(cpu, &pmap->pm_active); 3003 smr_enter(pmap->pm_eptsmr); 3004 eptgen = atomic_load_long(&pmap->pm_eptgen); 3005 if (eptgen != vmx->eptgen[cpu]) { 3006 vmx->eptgen[cpu] = eptgen; 3007 invept(INVEPT_TYPE_SINGLE_CONTEXT, 3008 (struct invept_desc){ .eptp = vmx->eptp, ._res = 0 }); 3009 } 3010 } 3011 3012 static __inline void 3013 vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap) 3014 { 3015 smr_exit(pmap->pm_eptsmr); 3016 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); 3017 } 3018 3019 static int 3020 vmx_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) 3021 { 3022 int rc, handled, launched, vcpuid; 3023 struct vmx *vmx; 3024 struct vmx_vcpu *vcpu; 3025 struct vm *vm; 3026 struct vmxctx *vmxctx; 3027 struct vmcs *vmcs; 3028 struct vm_exit *vmexit; 3029 struct vlapic *vlapic; 3030 uint32_t exit_reason; 3031 struct region_descriptor gdtr, idtr; 3032 uint16_t ldt_sel; 3033 3034 vcpu = vcpui; 3035 vmx = vcpu->vmx; 3036 vm = vmx->vm; 3037 vcpuid = vcpu->vcpuid; 3038 vmcs = vcpu->vmcs; 3039 vmxctx = &vcpu->ctx; 3040 vlapic = vm_lapic(vm, vcpuid); 3041 vmexit = vm_exitinfo(vm, vcpuid); 3042 launched = 0; 3043 3044 KASSERT(vmxctx->pmap == pmap, 3045 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap)); 3046 3047 vmx_msr_guest_enter(vmx, vcpu); 3048 3049 VMPTRLD(vmcs); 3050 3051 /* 3052 * XXX 3053 * We do this every time because we may setup the virtual machine 3054 * from a different process than the one that actually runs it. 3055 * 3056 * If the life of a virtual machine was spent entirely in the context 3057 * of a single process we could do this once in vmx_init(). 3058 */ 3059 vmcs_write(VMCS_HOST_CR3, rcr3()); 3060 3061 vmcs_write(VMCS_GUEST_RIP, rip); 3062 vmx_set_pcpu_defaults(vmx, vcpu, pmap); 3063 do { 3064 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch " 3065 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip)); 3066 3067 handled = UNHANDLED; 3068 /* 3069 * Interrupts are disabled from this point on until the 3070 * guest starts executing. This is done for the following 3071 * reasons: 3072 * 3073 * If an AST is asserted on this thread after the check below, 3074 * then the IPI_AST notification will not be lost, because it 3075 * will cause a VM exit due to external interrupt as soon as 3076 * the guest state is loaded. 3077 * 3078 * A posted interrupt after 'vmx_inject_interrupts()' will 3079 * not be "lost" because it will be held pending in the host 3080 * APIC because interrupts are disabled. The pending interrupt 3081 * will be recognized as soon as the guest state is loaded. 3082 * 3083 * The same reasoning applies to the IPI generated by 3084 * pmap_invalidate_ept(). 3085 */ 3086 disable_intr(); 3087 vmx_inject_interrupts(vmx, vcpu, vlapic, rip); 3088 3089 /* 3090 * Check for vcpu suspension after injecting events because 3091 * vmx_inject_interrupts() can suspend the vcpu due to a 3092 * triple fault. 3093 */ 3094 if (vcpu_suspended(evinfo)) { 3095 enable_intr(); 3096 vm_exit_suspended(vmx->vm, vcpuid, rip); 3097 break; 3098 } 3099 3100 if (vcpu_rendezvous_pending(evinfo)) { 3101 enable_intr(); 3102 vm_exit_rendezvous(vmx->vm, vcpuid, rip); 3103 break; 3104 } 3105 3106 if (vcpu_reqidle(evinfo)) { 3107 enable_intr(); 3108 vm_exit_reqidle(vmx->vm, vcpuid, rip); 3109 break; 3110 } 3111 3112 if (vcpu_should_yield(vm, vcpuid)) { 3113 enable_intr(); 3114 vm_exit_astpending(vmx->vm, vcpuid, rip); 3115 vmx_astpending_trace(vcpu, rip); 3116 handled = HANDLED; 3117 break; 3118 } 3119 3120 if (vcpu_debugged(vm, vcpuid)) { 3121 enable_intr(); 3122 vm_exit_debug(vmx->vm, vcpuid, rip); 3123 break; 3124 } 3125 3126 /* 3127 * If TPR Shadowing is enabled, the TPR Threshold 3128 * must be updated right before entering the guest. 3129 */ 3130 if (tpr_shadowing && !virtual_interrupt_delivery) { 3131 if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) { 3132 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3133 } 3134 } 3135 3136 /* 3137 * VM exits restore the base address but not the 3138 * limits of GDTR and IDTR. The VMCS only stores the 3139 * base address, so VM exits set the limits to 0xffff. 3140 * Save and restore the full GDTR and IDTR to restore 3141 * the limits. 3142 * 3143 * The VMCS does not save the LDTR at all, and VM 3144 * exits clear LDTR as if a NULL selector were loaded. 3145 * The userspace hypervisor probably doesn't use a 3146 * LDT, but save and restore it to be safe. 3147 */ 3148 sgdt(&gdtr); 3149 sidt(&idtr); 3150 ldt_sel = sldt(); 3151 3152 /* 3153 * The TSC_AUX MSR must be saved/restored while interrupts 3154 * are disabled so that it is not possible for the guest 3155 * TSC_AUX MSR value to be overwritten by the resume 3156 * portion of the IPI_SUSPEND codepath. This is why the 3157 * transition of this MSR is handled separately from those 3158 * handled by vmx_msr_guest_{enter,exit}(), which are ok to 3159 * be transitioned with preemption disabled but interrupts 3160 * enabled. 3161 * 3162 * These vmx_msr_guest_{enter,exit}_tsc_aux() calls can be 3163 * anywhere in this loop so long as they happen with 3164 * interrupts disabled. This location is chosen for 3165 * simplicity. 3166 */ 3167 vmx_msr_guest_enter_tsc_aux(vmx, vcpu); 3168 3169 vmx_dr_enter_guest(vmxctx); 3170 3171 /* 3172 * Mark the EPT as active on this host CPU and invalidate 3173 * EPTP-tagged TLB entries if required. 3174 */ 3175 vmx_pmap_activate(vmx, pmap); 3176 3177 vmx_run_trace(vcpu); 3178 rc = vmx_enter_guest(vmxctx, vmx, launched); 3179 3180 vmx_pmap_deactivate(vmx, pmap); 3181 vmx_dr_leave_guest(vmxctx); 3182 vmx_msr_guest_exit_tsc_aux(vmx, vcpu); 3183 3184 bare_lgdt(&gdtr); 3185 lidt(&idtr); 3186 lldt(ldt_sel); 3187 3188 /* Collect some information for VM exit processing */ 3189 vmexit->rip = rip = vmcs_guest_rip(); 3190 vmexit->inst_length = vmexit_instruction_length(); 3191 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason(); 3192 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification(); 3193 3194 /* Update 'nextrip' */ 3195 vcpu->state.nextrip = rip; 3196 3197 if (rc == VMX_GUEST_VMEXIT) { 3198 vmx_exit_handle_nmi(vcpu, vmexit); 3199 enable_intr(); 3200 handled = vmx_exit_process(vmx, vcpu, vmexit); 3201 } else { 3202 enable_intr(); 3203 vmx_exit_inst_error(vmxctx, rc, vmexit); 3204 } 3205 launched = 1; 3206 vmx_exit_trace(vcpu, rip, exit_reason, handled); 3207 rip = vmexit->rip; 3208 } while (handled); 3209 3210 /* 3211 * If a VM exit has been handled then the exitcode must be BOGUS 3212 * If a VM exit is not handled then the exitcode must not be BOGUS 3213 */ 3214 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) || 3215 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) { 3216 panic("Mismatch between handled (%d) and exitcode (%d)", 3217 handled, vmexit->exitcode); 3218 } 3219 3220 VCPU_CTR1(vm, vcpuid, "returning from vmx_run: exitcode %d", 3221 vmexit->exitcode); 3222 3223 VMCLEAR(vmcs); 3224 vmx_msr_guest_exit(vmx, vcpu); 3225 3226 return (0); 3227 } 3228 3229 static void 3230 vmx_vcpu_cleanup(void *vcpui) 3231 { 3232 struct vmx_vcpu *vcpu = vcpui; 3233 3234 vpid_free(vcpu->state.vpid); 3235 free(vcpu->pir_desc, M_VMX); 3236 free(vcpu->apic_page, M_VMX); 3237 free(vcpu->vmcs, M_VMX); 3238 free(vcpu, M_VMX); 3239 } 3240 3241 static void 3242 vmx_cleanup(void *vmi) 3243 { 3244 struct vmx *vmx = vmi; 3245 3246 if (virtual_interrupt_delivery) 3247 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3248 3249 free(vmx->msr_bitmap, M_VMX); 3250 free(vmx, M_VMX); 3251 3252 return; 3253 } 3254 3255 static register_t * 3256 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3257 { 3258 3259 switch (reg) { 3260 case VM_REG_GUEST_RAX: 3261 return (&vmxctx->guest_rax); 3262 case VM_REG_GUEST_RBX: 3263 return (&vmxctx->guest_rbx); 3264 case VM_REG_GUEST_RCX: 3265 return (&vmxctx->guest_rcx); 3266 case VM_REG_GUEST_RDX: 3267 return (&vmxctx->guest_rdx); 3268 case VM_REG_GUEST_RSI: 3269 return (&vmxctx->guest_rsi); 3270 case VM_REG_GUEST_RDI: 3271 return (&vmxctx->guest_rdi); 3272 case VM_REG_GUEST_RBP: 3273 return (&vmxctx->guest_rbp); 3274 case VM_REG_GUEST_R8: 3275 return (&vmxctx->guest_r8); 3276 case VM_REG_GUEST_R9: 3277 return (&vmxctx->guest_r9); 3278 case VM_REG_GUEST_R10: 3279 return (&vmxctx->guest_r10); 3280 case VM_REG_GUEST_R11: 3281 return (&vmxctx->guest_r11); 3282 case VM_REG_GUEST_R12: 3283 return (&vmxctx->guest_r12); 3284 case VM_REG_GUEST_R13: 3285 return (&vmxctx->guest_r13); 3286 case VM_REG_GUEST_R14: 3287 return (&vmxctx->guest_r14); 3288 case VM_REG_GUEST_R15: 3289 return (&vmxctx->guest_r15); 3290 case VM_REG_GUEST_CR2: 3291 return (&vmxctx->guest_cr2); 3292 case VM_REG_GUEST_DR0: 3293 return (&vmxctx->guest_dr0); 3294 case VM_REG_GUEST_DR1: 3295 return (&vmxctx->guest_dr1); 3296 case VM_REG_GUEST_DR2: 3297 return (&vmxctx->guest_dr2); 3298 case VM_REG_GUEST_DR3: 3299 return (&vmxctx->guest_dr3); 3300 case VM_REG_GUEST_DR6: 3301 return (&vmxctx->guest_dr6); 3302 default: 3303 break; 3304 } 3305 return (NULL); 3306 } 3307 3308 static int 3309 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval) 3310 { 3311 register_t *regp; 3312 3313 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3314 *retval = *regp; 3315 return (0); 3316 } else 3317 return (EINVAL); 3318 } 3319 3320 static int 3321 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val) 3322 { 3323 register_t *regp; 3324 3325 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) { 3326 *regp = val; 3327 return (0); 3328 } else 3329 return (EINVAL); 3330 } 3331 3332 static int 3333 vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval) 3334 { 3335 uint64_t gi; 3336 int error; 3337 3338 error = vmcs_getreg(vcpu->vmcs, running, 3339 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); 3340 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3341 return (error); 3342 } 3343 3344 static int 3345 vmx_modify_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t val) 3346 { 3347 struct vmcs *vmcs; 3348 uint64_t gi; 3349 int error, ident; 3350 3351 /* 3352 * Forcing the vcpu into an interrupt shadow is not supported. 3353 */ 3354 if (val) { 3355 error = EINVAL; 3356 goto done; 3357 } 3358 3359 vmcs = vcpu->vmcs; 3360 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); 3361 error = vmcs_getreg(vmcs, running, ident, &gi); 3362 if (error == 0) { 3363 gi &= ~HWINTR_BLOCKING; 3364 error = vmcs_setreg(vmcs, running, ident, gi); 3365 } 3366 done: 3367 VCPU_CTR2(vcpu->vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s", 3368 val, error ? "failed" : "succeeded"); 3369 return (error); 3370 } 3371 3372 static int 3373 vmx_shadow_reg(int reg) 3374 { 3375 int shreg; 3376 3377 shreg = -1; 3378 3379 switch (reg) { 3380 case VM_REG_GUEST_CR0: 3381 shreg = VMCS_CR0_SHADOW; 3382 break; 3383 case VM_REG_GUEST_CR4: 3384 shreg = VMCS_CR4_SHADOW; 3385 break; 3386 default: 3387 break; 3388 } 3389 3390 return (shreg); 3391 } 3392 3393 static int 3394 vmx_getreg(void *vcpui, int reg, uint64_t *retval) 3395 { 3396 int running, hostcpu; 3397 struct vmx_vcpu *vcpu = vcpui; 3398 struct vmx *vmx = vcpu->vmx; 3399 3400 running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); 3401 if (running && hostcpu != curcpu) 3402 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), 3403 vcpu->vcpuid); 3404 3405 if (reg == VM_REG_GUEST_INTR_SHADOW) 3406 return (vmx_get_intr_shadow(vcpu, running, retval)); 3407 3408 if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0) 3409 return (0); 3410 3411 return (vmcs_getreg(vcpu->vmcs, running, reg, retval)); 3412 } 3413 3414 static int 3415 vmx_setreg(void *vcpui, int reg, uint64_t val) 3416 { 3417 int error, hostcpu, running, shadow; 3418 uint64_t ctls; 3419 pmap_t pmap; 3420 struct vmx_vcpu *vcpu = vcpui; 3421 struct vmx *vmx = vcpu->vmx; 3422 3423 running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); 3424 if (running && hostcpu != curcpu) 3425 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), 3426 vcpu->vcpuid); 3427 3428 if (reg == VM_REG_GUEST_INTR_SHADOW) 3429 return (vmx_modify_intr_shadow(vcpu, running, val)); 3430 3431 if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0) 3432 return (0); 3433 3434 /* Do not permit user write access to VMCS fields by offset. */ 3435 if (reg < 0) 3436 return (EINVAL); 3437 3438 error = vmcs_setreg(vcpu->vmcs, running, reg, val); 3439 3440 if (error == 0) { 3441 /* 3442 * If the "load EFER" VM-entry control is 1 then the 3443 * value of EFER.LMA must be identical to "IA-32e mode guest" 3444 * bit in the VM-entry control. 3445 */ 3446 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 && 3447 (reg == VM_REG_GUEST_EFER)) { 3448 vmcs_getreg(vcpu->vmcs, running, 3449 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls); 3450 if (val & EFER_LMA) 3451 ctls |= VM_ENTRY_GUEST_LMA; 3452 else 3453 ctls &= ~VM_ENTRY_GUEST_LMA; 3454 vmcs_setreg(vcpu->vmcs, running, 3455 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls); 3456 } 3457 3458 shadow = vmx_shadow_reg(reg); 3459 if (shadow > 0) { 3460 /* 3461 * Store the unmodified value in the shadow 3462 */ 3463 error = vmcs_setreg(vcpu->vmcs, running, 3464 VMCS_IDENT(shadow), val); 3465 } 3466 3467 if (reg == VM_REG_GUEST_CR3) { 3468 /* 3469 * Invalidate the guest vcpu's TLB mappings to emulate 3470 * the behavior of updating %cr3. 3471 * 3472 * XXX the processor retains global mappings when %cr3 3473 * is updated but vmx_invvpid() does not. 3474 */ 3475 pmap = vcpu->ctx.pmap; 3476 vmx_invvpid(vmx, vcpu, pmap, running); 3477 } 3478 } 3479 3480 return (error); 3481 } 3482 3483 static int 3484 vmx_getdesc(void *vcpui, int reg, struct seg_desc *desc) 3485 { 3486 int hostcpu, running; 3487 struct vmx_vcpu *vcpu = vcpui; 3488 struct vmx *vmx = vcpu->vmx; 3489 3490 running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); 3491 if (running && hostcpu != curcpu) 3492 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), 3493 vcpu->vcpuid); 3494 3495 return (vmcs_getdesc(vcpu->vmcs, running, reg, desc)); 3496 } 3497 3498 static int 3499 vmx_setdesc(void *vcpui, int reg, struct seg_desc *desc) 3500 { 3501 int hostcpu, running; 3502 struct vmx_vcpu *vcpu = vcpui; 3503 struct vmx *vmx = vcpu->vmx; 3504 3505 running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); 3506 if (running && hostcpu != curcpu) 3507 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), 3508 vcpu->vcpuid); 3509 3510 return (vmcs_setdesc(vcpu->vmcs, running, reg, desc)); 3511 } 3512 3513 static int 3514 vmx_getcap(void *vcpui, int type, int *retval) 3515 { 3516 struct vmx_vcpu *vcpu = vcpui; 3517 int vcap; 3518 int ret; 3519 3520 ret = ENOENT; 3521 3522 vcap = vcpu->cap.set; 3523 3524 switch (type) { 3525 case VM_CAP_HALT_EXIT: 3526 if (cap_halt_exit) 3527 ret = 0; 3528 break; 3529 case VM_CAP_PAUSE_EXIT: 3530 if (cap_pause_exit) 3531 ret = 0; 3532 break; 3533 case VM_CAP_MTRAP_EXIT: 3534 if (cap_monitor_trap) 3535 ret = 0; 3536 break; 3537 case VM_CAP_RDPID: 3538 if (cap_rdpid) 3539 ret = 0; 3540 break; 3541 case VM_CAP_RDTSCP: 3542 if (cap_rdtscp) 3543 ret = 0; 3544 break; 3545 case VM_CAP_UNRESTRICTED_GUEST: 3546 if (cap_unrestricted_guest) 3547 ret = 0; 3548 break; 3549 case VM_CAP_ENABLE_INVPCID: 3550 if (cap_invpcid) 3551 ret = 0; 3552 break; 3553 case VM_CAP_BPT_EXIT: 3554 case VM_CAP_IPI_EXIT: 3555 ret = 0; 3556 break; 3557 default: 3558 break; 3559 } 3560 3561 if (ret == 0) 3562 *retval = (vcap & (1 << type)) ? 1 : 0; 3563 3564 return (ret); 3565 } 3566 3567 static int 3568 vmx_setcap(void *vcpui, int type, int val) 3569 { 3570 struct vmx_vcpu *vcpu = vcpui; 3571 struct vmcs *vmcs = vcpu->vmcs; 3572 struct vlapic *vlapic; 3573 uint32_t baseval; 3574 uint32_t *pptr; 3575 int error; 3576 int flag; 3577 int reg; 3578 int retval; 3579 3580 retval = ENOENT; 3581 pptr = NULL; 3582 3583 switch (type) { 3584 case VM_CAP_HALT_EXIT: 3585 if (cap_halt_exit) { 3586 retval = 0; 3587 pptr = &vcpu->cap.proc_ctls; 3588 baseval = *pptr; 3589 flag = PROCBASED_HLT_EXITING; 3590 reg = VMCS_PRI_PROC_BASED_CTLS; 3591 } 3592 break; 3593 case VM_CAP_MTRAP_EXIT: 3594 if (cap_monitor_trap) { 3595 retval = 0; 3596 pptr = &vcpu->cap.proc_ctls; 3597 baseval = *pptr; 3598 flag = PROCBASED_MTF; 3599 reg = VMCS_PRI_PROC_BASED_CTLS; 3600 } 3601 break; 3602 case VM_CAP_PAUSE_EXIT: 3603 if (cap_pause_exit) { 3604 retval = 0; 3605 pptr = &vcpu->cap.proc_ctls; 3606 baseval = *pptr; 3607 flag = PROCBASED_PAUSE_EXITING; 3608 reg = VMCS_PRI_PROC_BASED_CTLS; 3609 } 3610 break; 3611 case VM_CAP_RDPID: 3612 case VM_CAP_RDTSCP: 3613 if (cap_rdpid || cap_rdtscp) 3614 /* 3615 * Choose not to support enabling/disabling 3616 * RDPID/RDTSCP via libvmmapi since, as per the 3617 * discussion in vmx_modinit(), RDPID/RDTSCP are 3618 * either always enabled or always disabled. 3619 */ 3620 error = EOPNOTSUPP; 3621 break; 3622 case VM_CAP_UNRESTRICTED_GUEST: 3623 if (cap_unrestricted_guest) { 3624 retval = 0; 3625 pptr = &vcpu->cap.proc_ctls2; 3626 baseval = *pptr; 3627 flag = PROCBASED2_UNRESTRICTED_GUEST; 3628 reg = VMCS_SEC_PROC_BASED_CTLS; 3629 } 3630 break; 3631 case VM_CAP_ENABLE_INVPCID: 3632 if (cap_invpcid) { 3633 retval = 0; 3634 pptr = &vcpu->cap.proc_ctls2; 3635 baseval = *pptr; 3636 flag = PROCBASED2_ENABLE_INVPCID; 3637 reg = VMCS_SEC_PROC_BASED_CTLS; 3638 } 3639 break; 3640 case VM_CAP_BPT_EXIT: 3641 retval = 0; 3642 3643 /* Don't change the bitmap if we are tracing all exceptions. */ 3644 if (vcpu->cap.exc_bitmap != 0xffffffff) { 3645 pptr = &vcpu->cap.exc_bitmap; 3646 baseval = *pptr; 3647 flag = (1 << IDT_BP); 3648 reg = VMCS_EXCEPTION_BITMAP; 3649 } 3650 break; 3651 case VM_CAP_IPI_EXIT: 3652 retval = 0; 3653 3654 vlapic = vm_lapic(vcpu->vmx->vm, vcpu->vcpuid); 3655 vlapic->ipi_exit = val; 3656 break; 3657 default: 3658 break; 3659 } 3660 3661 if (retval) 3662 return (retval); 3663 3664 if (pptr != NULL) { 3665 if (val) { 3666 baseval |= flag; 3667 } else { 3668 baseval &= ~flag; 3669 } 3670 VMPTRLD(vmcs); 3671 error = vmwrite(reg, baseval); 3672 VMCLEAR(vmcs); 3673 3674 if (error) 3675 return (error); 3676 3677 /* 3678 * Update optional stored flags, and record 3679 * setting 3680 */ 3681 *pptr = baseval; 3682 } 3683 3684 if (val) { 3685 vcpu->cap.set |= (1 << type); 3686 } else { 3687 vcpu->cap.set &= ~(1 << type); 3688 } 3689 3690 return (0); 3691 } 3692 3693 static struct vmspace * 3694 vmx_vmspace_alloc(vm_offset_t min, vm_offset_t max) 3695 { 3696 return (ept_vmspace_alloc(min, max)); 3697 } 3698 3699 static void 3700 vmx_vmspace_free(struct vmspace *vmspace) 3701 { 3702 ept_vmspace_free(vmspace); 3703 } 3704 3705 struct vlapic_vtx { 3706 struct vlapic vlapic; 3707 struct pir_desc *pir_desc; 3708 struct vmx_vcpu *vcpu; 3709 u_int pending_prio; 3710 }; 3711 3712 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3713 3714 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \ 3715 do { \ 3716 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \ 3717 level ? "level" : "edge", vector); \ 3718 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \ 3719 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \ 3720 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \ 3721 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \ 3722 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\ 3723 } while (0) 3724 3725 /* 3726 * vlapic->ops handlers that utilize the APICv hardware assist described in 3727 * Chapter 29 of the Intel SDM. 3728 */ 3729 static int 3730 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level) 3731 { 3732 struct vlapic_vtx *vlapic_vtx; 3733 struct pir_desc *pir_desc; 3734 uint64_t mask; 3735 int idx, notify = 0; 3736 3737 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3738 pir_desc = vlapic_vtx->pir_desc; 3739 3740 /* 3741 * Keep track of interrupt requests in the PIR descriptor. This is 3742 * because the virtual APIC page pointed to by the VMCS cannot be 3743 * modified if the vcpu is running. 3744 */ 3745 idx = vector / 64; 3746 mask = 1UL << (vector % 64); 3747 atomic_set_long(&pir_desc->pir[idx], mask); 3748 3749 /* 3750 * A notification is required whenever the 'pending' bit makes a 3751 * transition from 0->1. 3752 * 3753 * Even if the 'pending' bit is already asserted, notification about 3754 * the incoming interrupt may still be necessary. For example, if a 3755 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3756 * the 0->1 'pending' transition with a notification, but the vCPU 3757 * would ignore the interrupt for the time being. The same vCPU would 3758 * need to then be notified if a high-priority interrupt arrived which 3759 * satisfied the PPR. 3760 * 3761 * The priorities of interrupts injected while 'pending' is asserted 3762 * are tracked in a custom bitfield 'pending_prio'. Should the 3763 * to-be-injected interrupt exceed the priorities already present, the 3764 * notification is sent. The priorities recorded in 'pending_prio' are 3765 * cleared whenever the 'pending' bit makes another 0->1 transition. 3766 */ 3767 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3768 notify = 1; 3769 vlapic_vtx->pending_prio = 0; 3770 } else { 3771 const u_int old_prio = vlapic_vtx->pending_prio; 3772 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3773 3774 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3775 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3776 notify = 1; 3777 } 3778 } 3779 3780 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector, 3781 level, "vmx_set_intr_ready"); 3782 return (notify); 3783 } 3784 3785 static int 3786 vmx_pending_intr(struct vlapic *vlapic, int *vecptr) 3787 { 3788 struct vlapic_vtx *vlapic_vtx; 3789 struct pir_desc *pir_desc; 3790 struct LAPIC *lapic; 3791 uint64_t pending, pirval; 3792 uint32_t ppr, vpr; 3793 int i; 3794 3795 /* 3796 * This function is only expected to be called from the 'HLT' exit 3797 * handler which does not care about the vector that is pending. 3798 */ 3799 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL")); 3800 3801 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3802 pir_desc = vlapic_vtx->pir_desc; 3803 3804 pending = atomic_load_acq_long(&pir_desc->pending); 3805 if (!pending) { 3806 /* 3807 * While a virtual interrupt may have already been 3808 * processed the actual delivery maybe pending the 3809 * interruptibility of the guest. Recognize a pending 3810 * interrupt by reevaluating virtual interrupts 3811 * following Section 29.2.1 in the Intel SDM Volume 3. 3812 */ 3813 struct vm_exit *vmexit; 3814 uint8_t rvi, ppr; 3815 3816 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid); 3817 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT, 3818 ("vmx_pending_intr: exitcode not 'HLT'")); 3819 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT; 3820 lapic = vlapic->apic_page; 3821 ppr = lapic->ppr & APIC_TPR_INT; 3822 if (rvi > ppr) { 3823 return (1); 3824 } 3825 3826 return (0); 3827 } 3828 3829 /* 3830 * If there is an interrupt pending then it will be recognized only 3831 * if its priority is greater than the processor priority. 3832 * 3833 * Special case: if the processor priority is zero then any pending 3834 * interrupt will be recognized. 3835 */ 3836 lapic = vlapic->apic_page; 3837 ppr = lapic->ppr & APIC_TPR_INT; 3838 if (ppr == 0) 3839 return (1); 3840 3841 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d", 3842 lapic->ppr); 3843 3844 vpr = 0; 3845 for (i = 3; i >= 0; i--) { 3846 pirval = pir_desc->pir[i]; 3847 if (pirval != 0) { 3848 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT; 3849 break; 3850 } 3851 } 3852 3853 /* 3854 * If the highest-priority pending interrupt falls short of the 3855 * processor priority of this vCPU, ensure that 'pending_prio' does not 3856 * have any stale bits which would preclude a higher-priority interrupt 3857 * from incurring a notification later. 3858 */ 3859 if (vpr <= ppr) { 3860 const u_int prio_bit = VPR_PRIO_BIT(vpr); 3861 const u_int old = vlapic_vtx->pending_prio; 3862 3863 if (old > prio_bit && (old & prio_bit) == 0) { 3864 vlapic_vtx->pending_prio = prio_bit; 3865 } 3866 return (0); 3867 } 3868 return (1); 3869 } 3870 3871 static void 3872 vmx_intr_accepted(struct vlapic *vlapic, int vector) 3873 { 3874 3875 panic("vmx_intr_accepted: not expected to be called"); 3876 } 3877 3878 static void 3879 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level) 3880 { 3881 struct vlapic_vtx *vlapic_vtx; 3882 struct vmcs *vmcs; 3883 uint64_t mask, val; 3884 3885 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector)); 3886 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL), 3887 ("vmx_set_tmr: vcpu cannot be running")); 3888 3889 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3890 vmcs = vlapic_vtx->vcpu->vmcs; 3891 mask = 1UL << (vector % 64); 3892 3893 VMPTRLD(vmcs); 3894 val = vmcs_read(VMCS_EOI_EXIT(vector)); 3895 if (level) 3896 val |= mask; 3897 else 3898 val &= ~mask; 3899 vmcs_write(VMCS_EOI_EXIT(vector), val); 3900 VMCLEAR(vmcs); 3901 } 3902 3903 static void 3904 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3905 { 3906 struct vlapic_vtx *vlapic_vtx; 3907 struct vmx_vcpu *vcpu; 3908 struct vmcs *vmcs; 3909 uint32_t proc_ctls; 3910 3911 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3912 vcpu = vlapic_vtx->vcpu; 3913 vmcs = vcpu->vmcs; 3914 3915 proc_ctls = vcpu->cap.proc_ctls; 3916 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3917 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3918 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3919 vcpu->cap.proc_ctls = proc_ctls; 3920 3921 VMPTRLD(vmcs); 3922 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3923 VMCLEAR(vmcs); 3924 } 3925 3926 static void 3927 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3928 { 3929 struct vlapic_vtx *vlapic_vtx; 3930 struct vmx *vmx; 3931 struct vmx_vcpu *vcpu; 3932 struct vmcs *vmcs; 3933 uint32_t proc_ctls2; 3934 int error __diagused; 3935 3936 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3937 vcpu = vlapic_vtx->vcpu; 3938 vmx = vcpu->vmx; 3939 vmcs = vcpu->vmcs; 3940 3941 proc_ctls2 = vcpu->cap.proc_ctls2; 3942 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3943 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2)); 3944 3945 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3946 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3947 vcpu->cap.proc_ctls2 = proc_ctls2; 3948 3949 VMPTRLD(vmcs); 3950 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3951 VMCLEAR(vmcs); 3952 3953 if (vlapic->vcpuid == 0) { 3954 /* 3955 * The nested page table mappings are shared by all vcpus 3956 * so unmap the APIC access page just once. 3957 */ 3958 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 3959 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d", 3960 __func__, error)); 3961 3962 /* 3963 * The MSR bitmap is shared by all vcpus so modify it only 3964 * once in the context of vcpu 0. 3965 */ 3966 error = vmx_allow_x2apic_msrs(vmx); 3967 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d", 3968 __func__, error)); 3969 } 3970 } 3971 3972 static void 3973 vmx_post_intr(struct vlapic *vlapic, int hostcpu) 3974 { 3975 3976 ipi_cpu(hostcpu, pirvec); 3977 } 3978 3979 /* 3980 * Transfer the pending interrupts in the PIR descriptor to the IRR 3981 * in the virtual APIC page. 3982 */ 3983 static void 3984 vmx_inject_pir(struct vlapic *vlapic) 3985 { 3986 struct vlapic_vtx *vlapic_vtx; 3987 struct pir_desc *pir_desc; 3988 struct LAPIC *lapic; 3989 uint64_t val, pirval; 3990 int rvi, pirbase = -1; 3991 uint16_t intr_status_old, intr_status_new; 3992 3993 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3994 pir_desc = vlapic_vtx->pir_desc; 3995 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3996 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 3997 "no posted interrupt pending"); 3998 return; 3999 } 4000 4001 pirval = 0; 4002 pirbase = -1; 4003 lapic = vlapic->apic_page; 4004 4005 val = atomic_readandclear_long(&pir_desc->pir[0]); 4006 if (val != 0) { 4007 lapic->irr0 |= val; 4008 lapic->irr1 |= val >> 32; 4009 pirbase = 0; 4010 pirval = val; 4011 } 4012 4013 val = atomic_readandclear_long(&pir_desc->pir[1]); 4014 if (val != 0) { 4015 lapic->irr2 |= val; 4016 lapic->irr3 |= val >> 32; 4017 pirbase = 64; 4018 pirval = val; 4019 } 4020 4021 val = atomic_readandclear_long(&pir_desc->pir[2]); 4022 if (val != 0) { 4023 lapic->irr4 |= val; 4024 lapic->irr5 |= val >> 32; 4025 pirbase = 128; 4026 pirval = val; 4027 } 4028 4029 val = atomic_readandclear_long(&pir_desc->pir[3]); 4030 if (val != 0) { 4031 lapic->irr6 |= val; 4032 lapic->irr7 |= val >> 32; 4033 pirbase = 192; 4034 pirval = val; 4035 } 4036 4037 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir"); 4038 4039 /* 4040 * Update RVI so the processor can evaluate pending virtual 4041 * interrupts on VM-entry. 4042 * 4043 * It is possible for pirval to be 0 here, even though the 4044 * pending bit has been set. The scenario is: 4045 * CPU-Y is sending a posted interrupt to CPU-X, which 4046 * is running a guest and processing posted interrupts in h/w. 4047 * CPU-X will eventually exit and the state seen in s/w is 4048 * the pending bit set, but no PIR bits set. 4049 * 4050 * CPU-X CPU-Y 4051 * (vm running) (host running) 4052 * rx posted interrupt 4053 * CLEAR pending bit 4054 * SET PIR bit 4055 * READ/CLEAR PIR bits 4056 * SET pending bit 4057 * (vm exit) 4058 * pending bit set, PIR 0 4059 */ 4060 if (pirval != 0) { 4061 rvi = pirbase + flsl(pirval) - 1; 4062 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 4063 intr_status_new = (intr_status_old & 0xFF00) | rvi; 4064 if (intr_status_new > intr_status_old) { 4065 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new); 4066 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: " 4067 "guest_intr_status changed from 0x%04x to 0x%04x", 4068 intr_status_old, intr_status_new); 4069 } 4070 } 4071 } 4072 4073 static struct vlapic * 4074 vmx_vlapic_init(void *vcpui) 4075 { 4076 struct vmx *vmx; 4077 struct vmx_vcpu *vcpu; 4078 struct vlapic *vlapic; 4079 struct vlapic_vtx *vlapic_vtx; 4080 4081 vcpu = vcpui; 4082 vmx = vcpu->vmx; 4083 4084 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); 4085 vlapic->vm = vmx->vm; 4086 vlapic->vcpuid = vcpu->vcpuid; 4087 vlapic->apic_page = (struct LAPIC *)vcpu->apic_page; 4088 4089 vlapic_vtx = (struct vlapic_vtx *)vlapic; 4090 vlapic_vtx->pir_desc = vcpu->pir_desc; 4091 vlapic_vtx->vcpu = vcpu; 4092 4093 if (tpr_shadowing) { 4094 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 4095 } 4096 4097 if (virtual_interrupt_delivery) { 4098 vlapic->ops.set_intr_ready = vmx_set_intr_ready; 4099 vlapic->ops.pending_intr = vmx_pending_intr; 4100 vlapic->ops.intr_accepted = vmx_intr_accepted; 4101 vlapic->ops.set_tmr = vmx_set_tmr; 4102 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 4103 } 4104 4105 if (posted_interrupts) 4106 vlapic->ops.post_intr = vmx_post_intr; 4107 4108 vlapic_init(vlapic); 4109 4110 return (vlapic); 4111 } 4112 4113 static void 4114 vmx_vlapic_cleanup(struct vlapic *vlapic) 4115 { 4116 4117 vlapic_cleanup(vlapic); 4118 free(vlapic, M_VLAPIC); 4119 } 4120 4121 #ifdef BHYVE_SNAPSHOT 4122 static int 4123 vmx_snapshot(void *vmi, struct vm_snapshot_meta *meta) 4124 { 4125 return (0); 4126 } 4127 4128 static int 4129 vmx_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta) 4130 { 4131 struct vmcs *vmcs; 4132 struct vmx *vmx; 4133 struct vmx_vcpu *vcpu; 4134 struct vmxctx *vmxctx; 4135 int err, run, hostcpu; 4136 4137 err = 0; 4138 vcpu = vcpui; 4139 vmx = vcpu->vmx; 4140 vmcs = vcpu->vmcs; 4141 4142 run = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); 4143 if (run && hostcpu != curcpu) { 4144 printf("%s: %s%d is running", __func__, vm_name(vmx->vm), 4145 vcpu->vcpuid); 4146 return (EINVAL); 4147 } 4148 4149 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR0, meta); 4150 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR3, meta); 4151 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CR4, meta); 4152 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_DR7, meta); 4153 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RSP, meta); 4154 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RIP, meta); 4155 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_RFLAGS, meta); 4156 4157 /* Guest segments */ 4158 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_ES, meta); 4159 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_ES, meta); 4160 4161 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_CS, meta); 4162 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_CS, meta); 4163 4164 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_SS, meta); 4165 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_SS, meta); 4166 4167 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_DS, meta); 4168 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_DS, meta); 4169 4170 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_FS, meta); 4171 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_FS, meta); 4172 4173 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_GS, meta); 4174 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_GS, meta); 4175 4176 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_TR, meta); 4177 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_TR, meta); 4178 4179 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_LDTR, meta); 4180 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_LDTR, meta); 4181 4182 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_EFER, meta); 4183 4184 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_IDTR, meta); 4185 err += vmcs_snapshot_desc(vmcs, run, VM_REG_GUEST_GDTR, meta); 4186 4187 /* Guest page tables */ 4188 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE0, meta); 4189 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE1, meta); 4190 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE2, meta); 4191 err += vmcs_snapshot_reg(vmcs, run, VM_REG_GUEST_PDPTE3, meta); 4192 4193 /* Other guest state */ 4194 err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_CS, meta); 4195 err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_ESP, meta); 4196 err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_IA32_SYSENTER_EIP, meta); 4197 err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_INTERRUPTIBILITY, meta); 4198 err += vmcs_snapshot_any(vmcs, run, VMCS_GUEST_ACTIVITY, meta); 4199 err += vmcs_snapshot_any(vmcs, run, VMCS_ENTRY_CTLS, meta); 4200 err += vmcs_snapshot_any(vmcs, run, VMCS_EXIT_CTLS, meta); 4201 if (err != 0) 4202 goto done; 4203 4204 SNAPSHOT_BUF_OR_LEAVE(vcpu->guest_msrs, 4205 sizeof(vcpu->guest_msrs), meta, err, done); 4206 4207 vmxctx = &vcpu->ctx; 4208 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdi, meta, err, done); 4209 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rsi, meta, err, done); 4210 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rdx, meta, err, done); 4211 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rcx, meta, err, done); 4212 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r8, meta, err, done); 4213 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r9, meta, err, done); 4214 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rax, meta, err, done); 4215 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbx, meta, err, done); 4216 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_rbp, meta, err, done); 4217 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r10, meta, err, done); 4218 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r11, meta, err, done); 4219 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r12, meta, err, done); 4220 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r13, meta, err, done); 4221 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r14, meta, err, done); 4222 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_r15, meta, err, done); 4223 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_cr2, meta, err, done); 4224 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr0, meta, err, done); 4225 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr1, meta, err, done); 4226 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr2, meta, err, done); 4227 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr3, meta, err, done); 4228 SNAPSHOT_VAR_OR_LEAVE(vmxctx->guest_dr6, meta, err, done); 4229 4230 done: 4231 return (err); 4232 } 4233 4234 static int 4235 vmx_restore_tsc(void *vcpui, uint64_t offset) 4236 { 4237 struct vmx_vcpu *vcpu = vcpui; 4238 struct vmcs *vmcs; 4239 struct vmx *vmx; 4240 int error, running, hostcpu; 4241 4242 vmx = vcpu->vmx; 4243 vmcs = vcpu->vmcs; 4244 4245 running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu); 4246 if (running && hostcpu != curcpu) { 4247 printf("%s: %s%d is running", __func__, vm_name(vmx->vm), 4248 vcpu->vcpuid); 4249 return (EINVAL); 4250 } 4251 4252 if (!running) 4253 VMPTRLD(vmcs); 4254 4255 error = vmx_set_tsc_offset(vmx, vcpu, offset); 4256 4257 if (!running) 4258 VMCLEAR(vmcs); 4259 return (error); 4260 } 4261 #endif 4262 4263 const struct vmm_ops vmm_ops_intel = { 4264 .modinit = vmx_modinit, 4265 .modcleanup = vmx_modcleanup, 4266 .modresume = vmx_modresume, 4267 .init = vmx_init, 4268 .run = vmx_run, 4269 .cleanup = vmx_cleanup, 4270 .vcpu_init = vmx_vcpu_init, 4271 .vcpu_cleanup = vmx_vcpu_cleanup, 4272 .getreg = vmx_getreg, 4273 .setreg = vmx_setreg, 4274 .getdesc = vmx_getdesc, 4275 .setdesc = vmx_setdesc, 4276 .getcap = vmx_getcap, 4277 .setcap = vmx_setcap, 4278 .vmspace_alloc = vmx_vmspace_alloc, 4279 .vmspace_free = vmx_vmspace_free, 4280 .vlapic_init = vmx_vlapic_init, 4281 .vlapic_cleanup = vmx_vlapic_cleanup, 4282 #ifdef BHYVE_SNAPSHOT 4283 .snapshot = vmx_snapshot, 4284 .vcpu_snapshot = vmx_vcpu_snapshot, 4285 .restore_tsc = vmx_restore_tsc, 4286 #endif 4287 }; 4288