1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2022 Oxide Computer Company 44 * Copyright 2022 MNX Cloud, Inc. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/kmem.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #include <sys/x86_archext.h> 59 #include <sys/smp_impldefs.h> 60 #include <sys/smt.h> 61 #include <sys/hma.h> 62 #include <sys/trap.h> 63 #include <sys/archsystm.h> 64 65 #include <machine/psl.h> 66 #include <machine/cpufunc.h> 67 #include <machine/md_var.h> 68 #include <machine/reg.h> 69 #include <machine/segments.h> 70 #include <machine/specialreg.h> 71 #include <machine/vmparam.h> 72 #include <sys/vmm_vm.h> 73 #include <sys/vmm_kernel.h> 74 75 #include <machine/vmm.h> 76 #include <machine/vmm_dev.h> 77 #include <sys/vmm_instruction_emul.h> 78 #include "vmm_lapic.h" 79 #include "vmm_host.h" 80 #include "vmm_ioport.h" 81 #include "vmm_stat.h" 82 #include "vatpic.h" 83 #include "vlapic.h" 84 #include "vlapic_priv.h" 85 86 #include "vmcs.h" 87 #include "vmx.h" 88 #include "vmx_msr.h" 89 #include "vmx_controls.h" 90 91 #define PINBASED_CTLS_ONE_SETTING \ 92 (PINBASED_EXTINT_EXITING | \ 93 PINBASED_NMI_EXITING | \ 94 PINBASED_VIRTUAL_NMI) 95 #define PINBASED_CTLS_ZERO_SETTING 0 96 97 #define PROCBASED_CTLS_WINDOW_SETTING \ 98 (PROCBASED_INT_WINDOW_EXITING | \ 99 PROCBASED_NMI_WINDOW_EXITING) 100 101 /* 102 * Distinct from FreeBSD bhyve, we consider several additional proc-based 103 * controls necessary: 104 * - TSC offsetting 105 * - HLT exiting 106 */ 107 #define PROCBASED_CTLS_ONE_SETTING \ 108 (PROCBASED_SECONDARY_CONTROLS | \ 109 PROCBASED_TSC_OFFSET | \ 110 PROCBASED_HLT_EXITING | \ 111 PROCBASED_MWAIT_EXITING | \ 112 PROCBASED_MONITOR_EXITING | \ 113 PROCBASED_IO_EXITING | \ 114 PROCBASED_MSR_BITMAPS | \ 115 PROCBASED_CTLS_WINDOW_SETTING | \ 116 PROCBASED_CR8_LOAD_EXITING | \ 117 PROCBASED_CR8_STORE_EXITING) 118 119 #define PROCBASED_CTLS_ZERO_SETTING \ 120 (PROCBASED_CR3_LOAD_EXITING | \ 121 PROCBASED_CR3_STORE_EXITING | \ 122 PROCBASED_IO_BITMAPS) 123 124 /* 125 * EPT and Unrestricted Guest are considered necessities. The latter is not a 126 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 127 * without a bootrom starting in real mode. 128 */ 129 #define PROCBASED_CTLS2_ONE_SETTING \ 130 (PROCBASED2_ENABLE_EPT | \ 131 PROCBASED2_UNRESTRICTED_GUEST) 132 #define PROCBASED_CTLS2_ZERO_SETTING 0 133 134 #define VM_EXIT_CTLS_ONE_SETTING \ 135 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 136 VM_EXIT_HOST_LMA | \ 137 VM_EXIT_LOAD_PAT | \ 138 VM_EXIT_SAVE_EFER | \ 139 VM_EXIT_LOAD_EFER | \ 140 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 141 142 #define VM_EXIT_CTLS_ZERO_SETTING 0 143 144 #define VM_ENTRY_CTLS_ONE_SETTING \ 145 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 146 VM_ENTRY_LOAD_EFER) 147 148 #define VM_ENTRY_CTLS_ZERO_SETTING \ 149 (VM_ENTRY_INTO_SMM | \ 150 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 151 152 /* 153 * Cover the EPT capabilities used by bhyve at present: 154 * - 4-level page walks 155 * - write-back memory type 156 * - INVEPT operations (all types) 157 * - INVVPID operations (single-context only) 158 */ 159 #define EPT_CAPS_REQUIRED \ 160 (IA32_VMX_EPT_VPID_PWL4 | \ 161 IA32_VMX_EPT_VPID_TYPE_WB | \ 162 IA32_VMX_EPT_VPID_INVEPT | \ 163 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 164 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 165 IA32_VMX_EPT_VPID_INVVPID | \ 166 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 167 168 #define HANDLED 1 169 #define UNHANDLED 0 170 171 SYSCTL_DECL(_hw_vmm); 172 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 173 NULL); 174 175 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 176 static uint32_t exit_ctls, entry_ctls; 177 178 static uint64_t cr0_ones_mask, cr0_zeros_mask; 179 180 static uint64_t cr4_ones_mask, cr4_zeros_mask; 181 182 static int vmx_initialized; 183 184 /* 185 * Optional capabilities 186 */ 187 188 /* PAUSE triggers a VM-exit */ 189 static int cap_pause_exit; 190 191 /* WBINVD triggers a VM-exit */ 192 static int cap_wbinvd_exit; 193 194 /* Monitor trap flag */ 195 static int cap_monitor_trap; 196 197 /* Guests are allowed to use INVPCID */ 198 static int cap_invpcid; 199 200 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 201 static enum vmx_caps vmx_capabilities; 202 203 /* APICv posted interrupt vector */ 204 static int pirvec = -1; 205 206 static uint_t vpid_alloc_failed; 207 208 int guest_l1d_flush; 209 int guest_l1d_flush_sw; 210 211 /* MSR save region is composed of an array of 'struct msr_entry' */ 212 struct msr_entry { 213 uint32_t index; 214 uint32_t reserved; 215 uint64_t val; 216 }; 217 218 static struct msr_entry msr_load_list[1] __aligned(16); 219 220 /* 221 * The definitions of SDT probes for VMX. 222 */ 223 224 /* BEGIN CSTYLED */ 225 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 226 "struct vmx *", "int", "struct vm_exit *"); 227 228 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 229 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 230 231 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 232 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 233 234 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 235 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 236 237 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 238 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 239 240 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 241 "struct vmx *", "int", "struct vm_exit *"); 242 243 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 244 "struct vmx *", "int", "struct vm_exit *"); 245 246 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 247 "struct vmx *", "int", "struct vm_exit *"); 248 249 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 250 "struct vmx *", "int", "struct vm_exit *"); 251 252 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 253 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 254 255 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 259 "struct vmx *", "int", "struct vm_exit *"); 260 261 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 262 "struct vmx *", "int", "struct vm_exit *"); 263 264 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 265 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 266 267 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 268 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 269 270 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 271 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 272 273 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 274 "struct vmx *", "int", "struct vm_exit *"); 275 276 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 277 "struct vmx *", "int", "struct vm_exit *"); 278 279 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 280 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 281 282 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 283 "struct vmx *", "int", "struct vm_exit *"); 284 285 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 286 "struct vmx *", "int", "struct vm_exit *"); 287 288 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 289 "struct vmx *", "int", "struct vm_exit *"); 290 291 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 292 "struct vmx *", "int", "struct vm_exit *"); 293 294 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 295 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 296 297 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 298 "struct vmx *", "int", "struct vm_exit *", "int"); 299 /* END CSTYLED */ 300 301 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 302 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 303 static void vmx_apply_tsc_adjust(struct vmx *, int); 304 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 305 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 306 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 307 308 static void 309 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 310 { 311 /* 312 * Allow readonly access to the following x2APIC MSRs from the guest. 313 */ 314 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 315 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 316 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 317 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 318 319 for (uint_t i = 0; i < 8; i++) { 320 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 321 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 323 } 324 325 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 329 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 330 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 331 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 332 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 333 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 334 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 335 336 /* 337 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 338 * 339 * These registers get special treatment described in the section 340 * "Virtualizing MSR-Based APIC Accesses". 341 */ 342 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 343 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 344 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 345 } 346 347 static ulong_t 348 vmx_fix_cr0(ulong_t cr0) 349 { 350 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 351 } 352 353 /* 354 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 355 * the value observable from the guest. 356 */ 357 static ulong_t 358 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 359 { 360 return ((cr0 & ~cr0_ones_mask) | 361 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 362 } 363 364 static ulong_t 365 vmx_fix_cr4(ulong_t cr4) 366 { 367 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 368 } 369 370 /* 371 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 372 * the value observable from the guest. 373 */ 374 static ulong_t 375 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 376 { 377 return ((cr4 & ~cr4_ones_mask) | 378 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 379 } 380 381 static void 382 vpid_free(int vpid) 383 { 384 if (vpid < 0 || vpid > 0xffff) 385 panic("vpid_free: invalid vpid %d", vpid); 386 387 /* 388 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 389 * the unit number allocator. 390 */ 391 392 if (vpid > VM_MAXCPU) 393 hma_vmx_vpid_free((uint16_t)vpid); 394 } 395 396 static void 397 vpid_alloc(uint16_t *vpid, int num) 398 { 399 int i, x; 400 401 if (num <= 0 || num > VM_MAXCPU) 402 panic("invalid number of vpids requested: %d", num); 403 404 /* 405 * If the "enable vpid" execution control is not enabled then the 406 * VPID is required to be 0 for all vcpus. 407 */ 408 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 409 for (i = 0; i < num; i++) 410 vpid[i] = 0; 411 return; 412 } 413 414 /* 415 * Allocate a unique VPID for each vcpu from the unit number allocator. 416 */ 417 for (i = 0; i < num; i++) { 418 uint16_t tmp; 419 420 tmp = hma_vmx_vpid_alloc(); 421 x = (tmp == 0) ? -1 : tmp; 422 423 if (x == -1) 424 break; 425 else 426 vpid[i] = x; 427 } 428 429 if (i < num) { 430 atomic_add_int(&vpid_alloc_failed, 1); 431 432 /* 433 * If the unit number allocator does not have enough unique 434 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 435 * 436 * These VPIDs are not be unique across VMs but this does not 437 * affect correctness because the combined mappings are also 438 * tagged with the EP4TA which is unique for each VM. 439 * 440 * It is still sub-optimal because the invvpid will invalidate 441 * combined mappings for a particular VPID across all EP4TAs. 442 */ 443 while (i-- > 0) 444 vpid_free(vpid[i]); 445 446 for (i = 0; i < num; i++) 447 vpid[i] = i + 1; 448 } 449 } 450 451 static int 452 vmx_cleanup(void) 453 { 454 /* This is taken care of by the hma registration */ 455 return (0); 456 } 457 458 static void 459 vmx_restore(void) 460 { 461 /* No-op on illumos */ 462 } 463 464 static int 465 vmx_init(void) 466 { 467 int error; 468 uint64_t fixed0, fixed1; 469 uint32_t tmp; 470 enum vmx_caps avail_caps = VMX_CAP_NONE; 471 472 /* Check support for primary processor-based VM-execution controls */ 473 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 474 MSR_VMX_TRUE_PROCBASED_CTLS, 475 PROCBASED_CTLS_ONE_SETTING, 476 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 477 if (error) { 478 printf("vmx_init: processor does not support desired primary " 479 "processor-based controls\n"); 480 return (error); 481 } 482 483 /* 484 * Clear interrupt-window/NMI-window exiting from the default proc-based 485 * controls. They are set and cleared based on runtime vCPU events. 486 */ 487 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 488 489 /* Check support for secondary processor-based VM-execution controls */ 490 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 491 MSR_VMX_PROCBASED_CTLS2, 492 PROCBASED_CTLS2_ONE_SETTING, 493 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 494 if (error) { 495 printf("vmx_init: processor does not support desired secondary " 496 "processor-based controls\n"); 497 return (error); 498 } 499 500 /* Check support for VPID */ 501 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 502 MSR_VMX_PROCBASED_CTLS2, 503 PROCBASED2_ENABLE_VPID, 504 0, &tmp); 505 if (error == 0) 506 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 507 508 /* Check support for pin-based VM-execution controls */ 509 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 510 MSR_VMX_TRUE_PINBASED_CTLS, 511 PINBASED_CTLS_ONE_SETTING, 512 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 513 if (error) { 514 printf("vmx_init: processor does not support desired " 515 "pin-based controls\n"); 516 return (error); 517 } 518 519 /* Check support for VM-exit controls */ 520 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 521 VM_EXIT_CTLS_ONE_SETTING, 522 VM_EXIT_CTLS_ZERO_SETTING, 523 &exit_ctls); 524 if (error) { 525 printf("vmx_init: processor does not support desired " 526 "exit controls\n"); 527 return (error); 528 } 529 530 /* Check support for VM-entry controls */ 531 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 532 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 533 &entry_ctls); 534 if (error) { 535 printf("vmx_init: processor does not support desired " 536 "entry controls\n"); 537 return (error); 538 } 539 540 /* 541 * Check support for optional features by testing them 542 * as individual bits 543 */ 544 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 545 MSR_VMX_PROCBASED_CTLS, 546 PROCBASED_MTF, 0, 547 &tmp) == 0); 548 549 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 550 MSR_VMX_TRUE_PROCBASED_CTLS, 551 PROCBASED_PAUSE_EXITING, 0, 552 &tmp) == 0); 553 554 cap_wbinvd_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 555 MSR_VMX_PROCBASED_CTLS2, 556 PROCBASED2_WBINVD_EXITING, 0, 557 &tmp) == 0); 558 559 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 560 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 561 &tmp) == 0); 562 563 /* 564 * Check for APIC virtualization capabilities: 565 * - TPR shadowing 566 * - Full APICv (with or without x2APIC support) 567 * - Posted interrupt handling 568 */ 569 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 570 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 571 avail_caps |= VMX_CAP_TPR_SHADOW; 572 573 const uint32_t apicv_bits = 574 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 575 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 576 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 577 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 578 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 579 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 580 avail_caps |= VMX_CAP_APICV; 581 582 /* 583 * It may make sense in the future to differentiate 584 * hardware (or software) configurations with APICv but 585 * no support for accelerating x2APIC mode. 586 */ 587 avail_caps |= VMX_CAP_APICV_X2APIC; 588 589 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 590 MSR_VMX_TRUE_PINBASED_CTLS, 591 PINBASED_POSTED_INTERRUPT, 0, &tmp); 592 if (error == 0) { 593 /* 594 * If the PSM-provided interfaces for requesting 595 * and using a PIR IPI vector are present, use 596 * them for posted interrupts. 597 */ 598 if (psm_get_pir_ipivect != NULL && 599 psm_send_pir_ipi != NULL) { 600 pirvec = psm_get_pir_ipivect(); 601 avail_caps |= VMX_CAP_APICV_PIR; 602 } 603 } 604 } 605 } 606 607 /* 608 * Check for necessary EPT capabilities 609 * 610 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 611 * hypervisor intends to utilize dirty page tracking. 612 */ 613 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 614 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 615 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 616 return (EINVAL); 617 } 618 619 #ifdef __FreeBSD__ 620 guest_l1d_flush = (cpu_ia32_arch_caps & 621 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 622 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 623 624 /* 625 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 626 * available. Otherwise fall back to the software flush 627 * method which loads enough data from the kernel text to 628 * flush existing L1D content, both on VMX entry and on NMI 629 * return. 630 */ 631 if (guest_l1d_flush) { 632 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 633 guest_l1d_flush_sw = 1; 634 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 635 &guest_l1d_flush_sw); 636 } 637 if (guest_l1d_flush_sw) { 638 if (nmi_flush_l1d_sw <= 1) 639 nmi_flush_l1d_sw = 1; 640 } else { 641 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 642 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 643 } 644 } 645 #else 646 /* L1D flushing is taken care of by smt_acquire() and friends */ 647 guest_l1d_flush = 0; 648 #endif /* __FreeBSD__ */ 649 650 /* 651 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 652 */ 653 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 654 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 655 cr0_ones_mask = fixed0 & fixed1; 656 cr0_zeros_mask = ~fixed0 & ~fixed1; 657 658 /* 659 * Since Unrestricted Guest was already verified present, CR0_PE and 660 * CR0_PG are allowed to be set to zero in VMX non-root operation 661 */ 662 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 663 664 /* 665 * Do not allow the guest to set CR0_NW or CR0_CD. 666 */ 667 cr0_zeros_mask |= (CR0_NW | CR0_CD); 668 669 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 670 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 671 cr4_ones_mask = fixed0 & fixed1; 672 cr4_zeros_mask = ~fixed0 & ~fixed1; 673 674 vmx_msr_init(); 675 676 vmx_capabilities = avail_caps; 677 vmx_initialized = 1; 678 679 return (0); 680 } 681 682 static void 683 vmx_trigger_hostintr(int vector) 684 { 685 VERIFY(vector >= 32 && vector <= 255); 686 vmx_call_isr(vector - 32); 687 } 688 689 static void * 690 vmx_vminit(struct vm *vm) 691 { 692 uint16_t vpid[VM_MAXCPU]; 693 int i, error, datasel; 694 struct vmx *vmx; 695 uint32_t exc_bitmap; 696 uint16_t maxcpus; 697 uint32_t proc_ctls, proc2_ctls, pin_ctls; 698 uint64_t apic_access_pa = UINT64_MAX; 699 700 vmx = kmem_zalloc(sizeof (struct vmx), KM_SLEEP); 701 VERIFY3U((uintptr_t)vmx & PAGE_MASK, ==, 0); 702 703 vmx->vm = vm; 704 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 705 706 /* 707 * Clean up EP4TA-tagged guest-physical and combined mappings 708 * 709 * VMX transitions are not required to invalidate any guest physical 710 * mappings. So, it may be possible for stale guest physical mappings 711 * to be present in the processor TLBs. 712 * 713 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 714 */ 715 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 716 717 vmx_msr_bitmap_initialize(vmx); 718 719 vpid_alloc(vpid, VM_MAXCPU); 720 721 /* Grab the established defaults */ 722 proc_ctls = procbased_ctls; 723 proc2_ctls = procbased_ctls2; 724 pin_ctls = pinbased_ctls; 725 /* For now, default to the available capabilities */ 726 vmx->vmx_caps = vmx_capabilities; 727 728 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 729 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 730 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 731 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 732 } 733 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 734 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 735 736 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 737 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 738 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 739 740 /* 741 * Allocate a page of memory to back the APIC access address for 742 * when APICv features are in use. Guest MMIO accesses should 743 * never actually reach this page, but rather be intercepted. 744 */ 745 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 746 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 747 apic_access_pa = vtophys(vmx->apic_access_page); 748 749 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 750 apic_access_pa); 751 /* XXX this should really return an error to the caller */ 752 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 753 } 754 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 755 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 756 757 pin_ctls |= PINBASED_POSTED_INTERRUPT; 758 } 759 760 /* Reflect any enabled defaults in the cap set */ 761 int cap_defaults = 0; 762 if ((proc_ctls & PROCBASED_HLT_EXITING) != 0) { 763 cap_defaults |= (1 << VM_CAP_HALT_EXIT); 764 } 765 if ((proc_ctls & PROCBASED_PAUSE_EXITING) != 0) { 766 cap_defaults |= (1 << VM_CAP_PAUSE_EXIT); 767 } 768 if ((proc_ctls & PROCBASED_MTF) != 0) { 769 cap_defaults |= (1 << VM_CAP_MTRAP_EXIT); 770 } 771 if ((proc2_ctls & PROCBASED2_ENABLE_INVPCID) != 0) { 772 cap_defaults |= (1 << VM_CAP_ENABLE_INVPCID); 773 } 774 775 maxcpus = vm_get_maxcpus(vm); 776 datasel = vmm_get_host_datasel(); 777 for (i = 0; i < maxcpus; i++) { 778 /* 779 * Cache physical address lookups for various components which 780 * may be required inside the critical_enter() section implied 781 * by VMPTRLD() below. 782 */ 783 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 784 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 785 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 786 787 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 788 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 789 790 vmx_msr_guest_init(vmx, i); 791 792 vmcs_load(vmx->vmcs_pa[i]); 793 794 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 795 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 796 797 /* Load the control registers */ 798 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 799 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 800 801 /* Load the segment selectors */ 802 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 803 804 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 805 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 806 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 807 808 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 809 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 810 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 811 812 /* 813 * Configure host sysenter MSRs to be restored on VM exit. 814 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 815 * vmx_run. 816 */ 817 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 818 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 819 rdmsr(MSR_SYSENTER_EIP_MSR)); 820 821 /* instruction pointer */ 822 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 823 824 /* link pointer */ 825 vmcs_write(VMCS_LINK_POINTER, ~0); 826 827 vmcs_write(VMCS_EPTP, vmx->eptp); 828 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 829 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 830 831 uint32_t use_proc2_ctls = proc2_ctls; 832 if (cap_wbinvd_exit && vcpu_trap_wbinvd(vm, i) != 0) 833 use_proc2_ctls |= PROCBASED2_WBINVD_EXITING; 834 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, use_proc2_ctls); 835 836 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 837 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 838 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 839 vmcs_write(VMCS_VPID, vpid[i]); 840 841 if (guest_l1d_flush && !guest_l1d_flush_sw) { 842 vmcs_write(VMCS_ENTRY_MSR_LOAD, 843 vtophys(&msr_load_list[0])); 844 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 845 nitems(msr_load_list)); 846 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 847 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 848 } 849 850 /* exception bitmap */ 851 if (vcpu_trace_exceptions(vm, i)) 852 exc_bitmap = 0xffffffff; 853 else 854 exc_bitmap = 1 << IDT_MC; 855 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 856 857 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 858 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 859 860 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 861 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 862 } 863 864 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 865 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 866 vmcs_write(VMCS_EOI_EXIT0, 0); 867 vmcs_write(VMCS_EOI_EXIT1, 0); 868 vmcs_write(VMCS_EOI_EXIT2, 0); 869 vmcs_write(VMCS_EOI_EXIT3, 0); 870 } 871 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 872 vmcs_write(VMCS_PIR_VECTOR, pirvec); 873 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 874 } 875 876 /* 877 * Set up the CR0/4 masks and configure the read shadow state 878 * to the power-on register value from the Intel Sys Arch. 879 * CR0 - 0x60000010 880 * CR4 - 0 881 */ 882 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 883 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 884 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 885 vmcs_write(VMCS_CR4_SHADOW, 0); 886 887 vmcs_clear(vmx->vmcs_pa[i]); 888 889 vmx->cap[i].set = cap_defaults; 890 vmx->cap[i].proc_ctls = proc_ctls; 891 vmx->cap[i].proc_ctls2 = proc2_ctls; 892 vmx->cap[i].exc_bitmap = exc_bitmap; 893 894 vmx->state[i].nextrip = ~0; 895 vmx->state[i].lastcpu = NOCPU; 896 vmx->state[i].vpid = vpid[i]; 897 } 898 899 return (vmx); 900 } 901 902 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 903 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 904 905 #define INVVPID_TYPE_ADDRESS 0UL 906 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 907 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 908 909 struct invvpid_desc { 910 uint16_t vpid; 911 uint16_t _res1; 912 uint32_t _res2; 913 uint64_t linear_addr; 914 }; 915 CTASSERT(sizeof (struct invvpid_desc) == 16); 916 917 static __inline void 918 invvpid(uint64_t type, struct invvpid_desc desc) 919 { 920 int error; 921 922 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 923 uint64_t, desc.linear_addr); 924 925 __asm __volatile("invvpid %[desc], %[type];" 926 VMX_SET_ERROR_CODE_ASM 927 : [error] "=r" (error) 928 : [desc] "m" (desc), [type] "r" (type) 929 : "memory"); 930 931 if (error) { 932 panic("invvpid error %d", error); 933 } 934 } 935 936 /* 937 * Invalidate guest mappings identified by its VPID from the TLB. 938 * 939 * This is effectively a flush of the guest TLB, removing only "combined 940 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 941 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 942 */ 943 static void 944 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 945 { 946 struct vmxstate *vmxstate; 947 struct vmspace *vms; 948 949 vmxstate = &vmx->state[vcpu]; 950 if (vmxstate->vpid == 0) { 951 return; 952 } 953 954 if (!running) { 955 /* 956 * Set the 'lastcpu' to an invalid host cpu. 957 * 958 * This will invalidate TLB entries tagged with the vcpu's 959 * vpid the next time it runs via vmx_set_pcpu_defaults(). 960 */ 961 vmxstate->lastcpu = NOCPU; 962 return; 963 } 964 965 /* 966 * Invalidate all mappings tagged with 'vpid' 967 * 968 * This is done when a vCPU moves between host CPUs, where there may be 969 * stale TLB entries for this VPID on the target, or if emulated actions 970 * in the guest CPU have incurred an explicit TLB flush. 971 */ 972 vms = vm_get_vmspace(vmx->vm); 973 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 974 struct invvpid_desc invvpid_desc = { 975 .vpid = vmxstate->vpid, 976 .linear_addr = 0, 977 ._res1 = 0, 978 ._res2 = 0, 979 }; 980 981 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 982 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 983 } else { 984 /* 985 * The INVVPID can be skipped if an INVEPT is going to be 986 * performed before entering the guest. The INVEPT will 987 * invalidate combined mappings for the EP4TA associated with 988 * this guest, in all VPIDs. 989 */ 990 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 991 } 992 } 993 994 static __inline void 995 invept(uint64_t type, uint64_t eptp) 996 { 997 int error; 998 struct invept_desc { 999 uint64_t eptp; 1000 uint64_t _resv; 1001 } desc = { eptp, 0 }; 1002 1003 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 1004 1005 __asm __volatile("invept %[desc], %[type];" 1006 VMX_SET_ERROR_CODE_ASM 1007 : [error] "=r" (error) 1008 : [desc] "m" (desc), [type] "r" (type) 1009 : "memory"); 1010 1011 if (error != 0) { 1012 panic("invvpid error %d", error); 1013 } 1014 } 1015 1016 static void 1017 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1018 { 1019 struct vmxstate *vmxstate; 1020 1021 /* 1022 * Regardless of whether the VM appears to have migrated between CPUs, 1023 * save the host sysenter stack pointer. As it points to the kernel 1024 * stack of each thread, the correct value must be maintained for every 1025 * trip into the critical section. 1026 */ 1027 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1028 1029 /* 1030 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1031 * migration between host CPUs with differing TSC values. 1032 */ 1033 vmx_apply_tsc_adjust(vmx, vcpu); 1034 1035 vmxstate = &vmx->state[vcpu]; 1036 if (vmxstate->lastcpu == curcpu) 1037 return; 1038 1039 vmxstate->lastcpu = curcpu; 1040 1041 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1042 1043 /* Load the per-CPU IDT address */ 1044 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1045 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1046 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1047 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1048 vmx_invvpid(vmx, vcpu, 1); 1049 } 1050 1051 static __inline void 1052 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1053 { 1054 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) { 1055 /* Enable interrupt window exiting */ 1056 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1057 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1058 } 1059 } 1060 1061 static __inline void 1062 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1063 { 1064 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0, 1065 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls)); 1066 1067 /* Disable interrupt window exiting */ 1068 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1069 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1070 } 1071 1072 static __inline bool 1073 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1074 { 1075 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1076 } 1077 1078 static __inline void 1079 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1080 { 1081 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1082 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1083 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1084 } 1085 } 1086 1087 static __inline void 1088 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1089 { 1090 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 1091 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1092 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1093 } 1094 1095 /* 1096 * Set the TSC adjustment, taking into account the offsets measured between 1097 * host physical CPUs. This is required even if the guest has not set a TSC 1098 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1099 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1100 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1101 */ 1102 static void 1103 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1104 { 1105 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1106 1107 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1108 1109 if (vmx->tsc_offset_active[vcpu] != offset) { 1110 vmcs_write(VMCS_TSC_OFFSET, offset); 1111 vmx->tsc_offset_active[vcpu] = offset; 1112 } 1113 } 1114 1115 CTASSERT(VMCS_INTR_T_HWINTR == VM_INTINFO_HWINTR); 1116 CTASSERT(VMCS_INTR_T_NMI == VM_INTINFO_NMI); 1117 CTASSERT(VMCS_INTR_T_HWEXCEPTION == VM_INTINFO_HWEXCP); 1118 CTASSERT(VMCS_INTR_T_SWINTR == VM_INTINFO_SWINTR); 1119 CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION == VM_INTINFO_RESV5); 1120 CTASSERT(VMCS_INTR_T_SWEXCEPTION == VM_INTINFO_RESV6); 1121 CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID == VM_INTINFO_DEL_ERRCODE); 1122 CTASSERT(VMCS_INTR_T_MASK == VM_INTINFO_MASK_TYPE); 1123 1124 static uint64_t 1125 vmx_idtvec_to_intinfo(uint32_t info) 1126 { 1127 ASSERT(info & VMCS_IDT_VEC_VALID); 1128 1129 const uint32_t type = info & VMCS_INTR_T_MASK; 1130 const uint8_t vec = info & 0xff; 1131 1132 switch (type) { 1133 case VMCS_INTR_T_HWINTR: 1134 case VMCS_INTR_T_NMI: 1135 case VMCS_INTR_T_HWEXCEPTION: 1136 case VMCS_INTR_T_SWINTR: 1137 case VMCS_INTR_T_PRIV_SWEXCEPTION: 1138 case VMCS_INTR_T_SWEXCEPTION: 1139 break; 1140 default: 1141 panic("unexpected event type 0x%03x", type); 1142 } 1143 1144 uint64_t intinfo = VM_INTINFO_VALID | type | vec; 1145 if (info & VMCS_IDT_VEC_ERRCODE_VALID) { 1146 const uint32_t errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR); 1147 intinfo |= (uint64_t)errcode << 32; 1148 } 1149 1150 return (intinfo); 1151 } 1152 1153 static void 1154 vmx_inject_intinfo(uint64_t info) 1155 { 1156 ASSERT(VM_INTINFO_PENDING(info)); 1157 ASSERT0(info & VM_INTINFO_MASK_RSVD); 1158 1159 /* 1160 * The bhyve format matches that of the VMCS, which is ensured by the 1161 * CTASSERTs above. 1162 */ 1163 uint32_t inject = info; 1164 switch (VM_INTINFO_VECTOR(info)) { 1165 case IDT_BP: 1166 case IDT_OF: 1167 /* 1168 * VT-x requires #BP and #OF to be injected as software 1169 * exceptions. 1170 */ 1171 inject &= ~VMCS_INTR_T_MASK; 1172 inject |= VMCS_INTR_T_SWEXCEPTION; 1173 break; 1174 default: 1175 break; 1176 } 1177 1178 if (VM_INTINFO_HAS_ERRCODE(info)) { 1179 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1180 VM_INTINFO_ERRCODE(info)); 1181 } 1182 vmcs_write(VMCS_ENTRY_INTR_INFO, inject); 1183 } 1184 1185 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1186 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1187 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1188 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1189 1190 static void 1191 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1192 { 1193 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1194 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1195 1196 /* 1197 * Inject the virtual NMI. The vector must be the NMI IDT entry 1198 * or the VMCS entry check will fail. 1199 */ 1200 vmcs_write(VMCS_ENTRY_INTR_INFO, 1201 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1202 1203 /* Clear the request */ 1204 vm_nmi_clear(vmx->vm, vcpu); 1205 } 1206 1207 /* 1208 * Inject exceptions, NMIs, and ExtINTs. 1209 * 1210 * The logic behind these are complicated and may involve mutex contention, so 1211 * the injection is performed without the protection of host CPU interrupts 1212 * being disabled. This means a racing notification could be "lost", 1213 * necessitating a later call to vmx_inject_recheck() to close that window 1214 * of opportunity. 1215 */ 1216 static enum event_inject_state 1217 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1218 { 1219 uint64_t entryinfo; 1220 uint32_t gi, info; 1221 int vector; 1222 enum event_inject_state state; 1223 1224 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1225 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1226 state = EIS_CAN_INJECT; 1227 1228 /* Clear any interrupt blocking if the guest %rip has changed */ 1229 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1230 gi &= ~HWINTR_BLOCKING; 1231 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1232 } 1233 1234 /* 1235 * It could be that an interrupt is already pending for injection from 1236 * the VMCS. This would be the case if the vCPU exited for conditions 1237 * such as an AST before a vm-entry delivered the injection. 1238 */ 1239 if ((info & VMCS_INTR_VALID) != 0) { 1240 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1241 } 1242 1243 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1244 vmx_inject_intinfo(entryinfo); 1245 state = EIS_EV_INJECTED; 1246 } 1247 1248 if (vm_nmi_pending(vmx->vm, vcpu)) { 1249 /* 1250 * If there are no conditions blocking NMI injection then inject 1251 * it directly here otherwise enable "NMI window exiting" to 1252 * inject it as soon as we can. 1253 * 1254 * According to the Intel manual, some CPUs do not allow NMI 1255 * injection when STI_BLOCKING is active. That check is 1256 * enforced here, regardless of CPU capability. If running on a 1257 * CPU without such a restriction it will immediately exit and 1258 * the NMI will be injected in the "NMI window exiting" handler. 1259 */ 1260 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1261 if (state == EIS_CAN_INJECT) { 1262 vmx_inject_nmi(vmx, vcpu); 1263 state = EIS_EV_INJECTED; 1264 } else { 1265 return (state | EIS_REQ_EXIT); 1266 } 1267 } else { 1268 vmx_set_nmi_window_exiting(vmx, vcpu); 1269 } 1270 } 1271 1272 if (vm_extint_pending(vmx->vm, vcpu)) { 1273 if (state != EIS_CAN_INJECT) { 1274 return (state | EIS_REQ_EXIT); 1275 } 1276 if ((gi & HWINTR_BLOCKING) != 0 || 1277 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1278 return (EIS_GI_BLOCK); 1279 } 1280 1281 /* Ask the legacy pic for a vector to inject */ 1282 vatpic_pending_intr(vmx->vm, &vector); 1283 1284 /* 1285 * From the Intel SDM, Volume 3, Section "Maskable 1286 * Hardware Interrupts": 1287 * - maskable interrupt vectors [0,255] can be delivered 1288 * through the INTR pin. 1289 */ 1290 KASSERT(vector >= 0 && vector <= 255, 1291 ("invalid vector %d from INTR", vector)); 1292 1293 /* Inject the interrupt */ 1294 vmcs_write(VMCS_ENTRY_INTR_INFO, 1295 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1296 1297 vm_extint_clear(vmx->vm, vcpu); 1298 vatpic_intr_accepted(vmx->vm, vector); 1299 state = EIS_EV_INJECTED; 1300 } 1301 1302 return (state); 1303 } 1304 1305 /* 1306 * Inject any interrupts pending on the vLAPIC. 1307 * 1308 * This is done with host CPU interrupts disabled so notification IPIs, either 1309 * from the standard vCPU notification or APICv posted interrupts, will be 1310 * queued on the host APIC and recognized when entering VMX context. 1311 */ 1312 static enum event_inject_state 1313 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1314 { 1315 int vector; 1316 1317 if (!vlapic_pending_intr(vlapic, &vector)) { 1318 return (EIS_CAN_INJECT); 1319 } 1320 1321 /* 1322 * From the Intel SDM, Volume 3, Section "Maskable 1323 * Hardware Interrupts": 1324 * - maskable interrupt vectors [16,255] can be delivered 1325 * through the local APIC. 1326 */ 1327 KASSERT(vector >= 16 && vector <= 255, 1328 ("invalid vector %d from local APIC", vector)); 1329 1330 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1331 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1332 uint16_t status_new = (status_old & 0xff00) | vector; 1333 1334 /* 1335 * The APICv state will have been synced into the vLAPIC 1336 * as part of vlapic_pending_intr(). Prepare the VMCS 1337 * for the to-be-injected pending interrupt. 1338 */ 1339 if (status_new > status_old) { 1340 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1341 } 1342 1343 /* 1344 * Ensure VMCS state regarding EOI traps is kept in sync 1345 * with the TMRs in the vlapic. 1346 */ 1347 vmx_apicv_sync_tmr(vlapic); 1348 1349 /* 1350 * The rest of the injection process for injecting the 1351 * interrupt(s) is handled by APICv. It does not preclude other 1352 * event injection from occurring. 1353 */ 1354 return (EIS_CAN_INJECT); 1355 } 1356 1357 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1358 1359 /* Does guest interruptability block injection? */ 1360 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1361 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1362 return (EIS_GI_BLOCK); 1363 } 1364 1365 /* Inject the interrupt */ 1366 vmcs_write(VMCS_ENTRY_INTR_INFO, 1367 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1368 1369 /* Update the Local APIC ISR */ 1370 vlapic_intr_accepted(vlapic, vector); 1371 1372 return (EIS_EV_INJECTED); 1373 } 1374 1375 /* 1376 * Re-check for events to be injected. 1377 * 1378 * Once host CPU interrupts are disabled, check for the presence of any events 1379 * which require injection processing. If an exit is required upon injection, 1380 * or once the guest becomes interruptable, that will be configured too. 1381 */ 1382 static bool 1383 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1384 { 1385 if (state == EIS_CAN_INJECT) { 1386 if (vm_nmi_pending(vmx->vm, vcpu) && 1387 !vmx_nmi_window_exiting(vmx, vcpu)) { 1388 /* queued NMI not blocked by NMI-window-exiting */ 1389 return (true); 1390 } 1391 if (vm_extint_pending(vmx->vm, vcpu)) { 1392 /* queued ExtINT not blocked by existing injection */ 1393 return (true); 1394 } 1395 } else { 1396 if ((state & EIS_REQ_EXIT) != 0) { 1397 /* 1398 * Use a self-IPI to force an immediate exit after 1399 * event injection has occurred. 1400 */ 1401 poke_cpu(CPU->cpu_id); 1402 } else { 1403 /* 1404 * If any event is being injected, an exit immediately 1405 * upon becoming interruptable again will allow pending 1406 * or newly queued events to be injected in a timely 1407 * manner. 1408 */ 1409 vmx_set_int_window_exiting(vmx, vcpu); 1410 } 1411 } 1412 return (false); 1413 } 1414 1415 /* 1416 * If the Virtual NMIs execution control is '1' then the logical processor 1417 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1418 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1419 * virtual-NMI blocking. 1420 * 1421 * This unblocking occurs even if the IRET causes a fault. In this case the 1422 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1423 */ 1424 static void 1425 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1426 { 1427 uint32_t gi; 1428 1429 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1430 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1431 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1432 } 1433 1434 static void 1435 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1436 { 1437 uint32_t gi; 1438 1439 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1440 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1441 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1442 } 1443 1444 static void 1445 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1446 { 1447 uint32_t gi; 1448 1449 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1450 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1451 ("NMI blocking is not in effect %x", gi)); 1452 } 1453 1454 static int 1455 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1456 { 1457 struct vmxctx *vmxctx; 1458 uint64_t xcrval; 1459 const struct xsave_limits *limits; 1460 1461 vmxctx = &vmx->ctx[vcpu]; 1462 limits = vmm_get_xsave_limits(); 1463 1464 /* 1465 * Note that the processor raises a GP# fault on its own if 1466 * xsetbv is executed for CPL != 0, so we do not have to 1467 * emulate that fault here. 1468 */ 1469 1470 /* Only xcr0 is supported. */ 1471 if (vmxctx->guest_rcx != 0) { 1472 vm_inject_gp(vmx->vm, vcpu); 1473 return (HANDLED); 1474 } 1475 1476 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1477 if (!limits->xsave_enabled || 1478 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1479 vm_inject_ud(vmx->vm, vcpu); 1480 return (HANDLED); 1481 } 1482 1483 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1484 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1485 vm_inject_gp(vmx->vm, vcpu); 1486 return (HANDLED); 1487 } 1488 1489 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1490 vm_inject_gp(vmx->vm, vcpu); 1491 return (HANDLED); 1492 } 1493 1494 /* AVX (YMM_Hi128) requires SSE. */ 1495 if (xcrval & XFEATURE_ENABLED_AVX && 1496 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1497 vm_inject_gp(vmx->vm, vcpu); 1498 return (HANDLED); 1499 } 1500 1501 /* 1502 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1503 * ZMM_Hi256, and Hi16_ZMM. 1504 */ 1505 if (xcrval & XFEATURE_AVX512 && 1506 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1507 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1508 vm_inject_gp(vmx->vm, vcpu); 1509 return (HANDLED); 1510 } 1511 1512 /* 1513 * Intel MPX requires both bound register state flags to be 1514 * set. 1515 */ 1516 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1517 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1518 vm_inject_gp(vmx->vm, vcpu); 1519 return (HANDLED); 1520 } 1521 1522 /* 1523 * This runs "inside" vmrun() with the guest's FPU state, so 1524 * modifying xcr0 directly modifies the guest's xcr0, not the 1525 * host's. 1526 */ 1527 load_xcr(0, xcrval); 1528 return (HANDLED); 1529 } 1530 1531 static uint64_t 1532 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1533 { 1534 const struct vmxctx *vmxctx; 1535 1536 vmxctx = &vmx->ctx[vcpu]; 1537 1538 switch (ident) { 1539 case 0: 1540 return (vmxctx->guest_rax); 1541 case 1: 1542 return (vmxctx->guest_rcx); 1543 case 2: 1544 return (vmxctx->guest_rdx); 1545 case 3: 1546 return (vmxctx->guest_rbx); 1547 case 4: 1548 return (vmcs_read(VMCS_GUEST_RSP)); 1549 case 5: 1550 return (vmxctx->guest_rbp); 1551 case 6: 1552 return (vmxctx->guest_rsi); 1553 case 7: 1554 return (vmxctx->guest_rdi); 1555 case 8: 1556 return (vmxctx->guest_r8); 1557 case 9: 1558 return (vmxctx->guest_r9); 1559 case 10: 1560 return (vmxctx->guest_r10); 1561 case 11: 1562 return (vmxctx->guest_r11); 1563 case 12: 1564 return (vmxctx->guest_r12); 1565 case 13: 1566 return (vmxctx->guest_r13); 1567 case 14: 1568 return (vmxctx->guest_r14); 1569 case 15: 1570 return (vmxctx->guest_r15); 1571 default: 1572 panic("invalid vmx register %d", ident); 1573 } 1574 } 1575 1576 static void 1577 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1578 { 1579 struct vmxctx *vmxctx; 1580 1581 vmxctx = &vmx->ctx[vcpu]; 1582 1583 switch (ident) { 1584 case 0: 1585 vmxctx->guest_rax = regval; 1586 break; 1587 case 1: 1588 vmxctx->guest_rcx = regval; 1589 break; 1590 case 2: 1591 vmxctx->guest_rdx = regval; 1592 break; 1593 case 3: 1594 vmxctx->guest_rbx = regval; 1595 break; 1596 case 4: 1597 vmcs_write(VMCS_GUEST_RSP, regval); 1598 break; 1599 case 5: 1600 vmxctx->guest_rbp = regval; 1601 break; 1602 case 6: 1603 vmxctx->guest_rsi = regval; 1604 break; 1605 case 7: 1606 vmxctx->guest_rdi = regval; 1607 break; 1608 case 8: 1609 vmxctx->guest_r8 = regval; 1610 break; 1611 case 9: 1612 vmxctx->guest_r9 = regval; 1613 break; 1614 case 10: 1615 vmxctx->guest_r10 = regval; 1616 break; 1617 case 11: 1618 vmxctx->guest_r11 = regval; 1619 break; 1620 case 12: 1621 vmxctx->guest_r12 = regval; 1622 break; 1623 case 13: 1624 vmxctx->guest_r13 = regval; 1625 break; 1626 case 14: 1627 vmxctx->guest_r14 = regval; 1628 break; 1629 case 15: 1630 vmxctx->guest_r15 = regval; 1631 break; 1632 default: 1633 panic("invalid vmx register %d", ident); 1634 } 1635 } 1636 1637 static void 1638 vmx_sync_efer_state(struct vmx *vmx, int vcpu, uint64_t efer) 1639 { 1640 uint64_t ctrl; 1641 1642 /* 1643 * If the "load EFER" VM-entry control is 1 (which we require) then the 1644 * value of EFER.LMA must be identical to "IA-32e mode guest" bit in the 1645 * VM-entry control. 1646 */ 1647 ctrl = vmcs_read(VMCS_ENTRY_CTLS); 1648 if ((efer & EFER_LMA) != 0) { 1649 ctrl |= VM_ENTRY_GUEST_LMA; 1650 } else { 1651 ctrl &= ~VM_ENTRY_GUEST_LMA; 1652 } 1653 vmcs_write(VMCS_ENTRY_CTLS, ctrl); 1654 } 1655 1656 static int 1657 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1658 { 1659 uint64_t crval, regval; 1660 1661 /* We only handle mov to %cr0 at this time */ 1662 if ((exitqual & 0xf0) != 0x00) 1663 return (UNHANDLED); 1664 1665 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1666 1667 vmcs_write(VMCS_CR0_SHADOW, regval); 1668 1669 crval = regval | cr0_ones_mask; 1670 crval &= ~cr0_zeros_mask; 1671 1672 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1673 const uint64_t diff = crval ^ old; 1674 /* Flush the TLB if the paging or write-protect bits are changing */ 1675 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1676 vmx_invvpid(vmx, vcpu, 1); 1677 } 1678 1679 vmcs_write(VMCS_GUEST_CR0, crval); 1680 1681 if (regval & CR0_PG) { 1682 uint64_t efer; 1683 1684 /* Keep EFER.LMA properly updated if paging is enabled */ 1685 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1686 if (efer & EFER_LME) { 1687 efer |= EFER_LMA; 1688 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1689 vmx_sync_efer_state(vmx, vcpu, efer); 1690 } 1691 } 1692 1693 return (HANDLED); 1694 } 1695 1696 static int 1697 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1698 { 1699 uint64_t crval, regval; 1700 1701 /* We only handle mov to %cr4 at this time */ 1702 if ((exitqual & 0xf0) != 0x00) 1703 return (UNHANDLED); 1704 1705 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1706 1707 vmcs_write(VMCS_CR4_SHADOW, regval); 1708 1709 crval = regval | cr4_ones_mask; 1710 crval &= ~cr4_zeros_mask; 1711 vmcs_write(VMCS_GUEST_CR4, crval); 1712 1713 return (HANDLED); 1714 } 1715 1716 static int 1717 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1718 { 1719 struct vlapic *vlapic; 1720 uint64_t cr8; 1721 int regnum; 1722 1723 /* We only handle mov %cr8 to/from a register at this time. */ 1724 if ((exitqual & 0xe0) != 0x00) { 1725 return (UNHANDLED); 1726 } 1727 1728 vlapic = vm_lapic(vmx->vm, vcpu); 1729 regnum = (exitqual >> 8) & 0xf; 1730 if (exitqual & 0x10) { 1731 cr8 = vlapic_get_cr8(vlapic); 1732 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1733 } else { 1734 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1735 vlapic_set_cr8(vlapic, cr8); 1736 } 1737 1738 return (HANDLED); 1739 } 1740 1741 /* 1742 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1743 */ 1744 static int 1745 vmx_cpl(void) 1746 { 1747 uint32_t ssar; 1748 1749 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1750 return ((ssar >> 5) & 0x3); 1751 } 1752 1753 static enum vm_cpu_mode 1754 vmx_cpu_mode(void) 1755 { 1756 uint32_t csar; 1757 1758 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1759 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1760 if (csar & 0x2000) 1761 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1762 else 1763 return (CPU_MODE_COMPATIBILITY); 1764 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1765 return (CPU_MODE_PROTECTED); 1766 } else { 1767 return (CPU_MODE_REAL); 1768 } 1769 } 1770 1771 static enum vm_paging_mode 1772 vmx_paging_mode(void) 1773 { 1774 1775 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1776 return (PAGING_MODE_FLAT); 1777 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1778 return (PAGING_MODE_32); 1779 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1780 return (PAGING_MODE_64); 1781 else 1782 return (PAGING_MODE_PAE); 1783 } 1784 1785 static void 1786 vmx_paging_info(struct vm_guest_paging *paging) 1787 { 1788 paging->cr3 = vmcs_read(VMCS_GUEST_CR3); 1789 paging->cpl = vmx_cpl(); 1790 paging->cpu_mode = vmx_cpu_mode(); 1791 paging->paging_mode = vmx_paging_mode(); 1792 } 1793 1794 static void 1795 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1796 uint64_t gla) 1797 { 1798 struct vm_guest_paging paging; 1799 uint32_t csar; 1800 1801 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1802 vmexit->inst_length = 0; 1803 vmexit->u.mmio_emul.gpa = gpa; 1804 vmexit->u.mmio_emul.gla = gla; 1805 vmx_paging_info(&paging); 1806 1807 switch (paging.cpu_mode) { 1808 case CPU_MODE_REAL: 1809 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1810 vmexit->u.mmio_emul.cs_d = 0; 1811 break; 1812 case CPU_MODE_PROTECTED: 1813 case CPU_MODE_COMPATIBILITY: 1814 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1815 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1816 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1817 break; 1818 default: 1819 vmexit->u.mmio_emul.cs_base = 0; 1820 vmexit->u.mmio_emul.cs_d = 0; 1821 break; 1822 } 1823 1824 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1825 } 1826 1827 static void 1828 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1829 uint32_t eax) 1830 { 1831 struct vm_guest_paging paging; 1832 struct vm_inout *inout; 1833 1834 inout = &vmexit->u.inout; 1835 1836 inout->bytes = (qual & 0x7) + 1; 1837 inout->flags = 0; 1838 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1839 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1840 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1841 inout->port = (uint16_t)(qual >> 16); 1842 inout->eax = eax; 1843 if (inout->flags & INOUT_STR) { 1844 uint64_t inst_info; 1845 1846 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1847 1848 /* 1849 * According to the SDM, bits 9:7 encode the address size of the 1850 * ins/outs operation, but only values 0/1/2 are expected, 1851 * corresponding to 16/32/64 bit sizes. 1852 */ 1853 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1854 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1855 inout->addrsize == 8); 1856 1857 if (inout->flags & INOUT_IN) { 1858 /* 1859 * The bits describing the segment in INSTRUCTION_INFO 1860 * are not defined for ins, leaving it to system 1861 * software to assume %es (encoded as 0) 1862 */ 1863 inout->segment = 0; 1864 } else { 1865 /* 1866 * Bits 15-17 encode the segment for OUTS. 1867 * This value follows the standard x86 segment order. 1868 */ 1869 inout->segment = (inst_info >> 15) & 0x7; 1870 } 1871 } 1872 1873 vmexit->exitcode = VM_EXITCODE_INOUT; 1874 vmx_paging_info(&paging); 1875 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1876 1877 /* The in/out emulation will handle advancing %rip */ 1878 vmexit->inst_length = 0; 1879 } 1880 1881 static int 1882 ept_fault_type(uint64_t ept_qual) 1883 { 1884 int fault_type; 1885 1886 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1887 fault_type = PROT_WRITE; 1888 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1889 fault_type = PROT_EXEC; 1890 else 1891 fault_type = PROT_READ; 1892 1893 return (fault_type); 1894 } 1895 1896 static bool 1897 ept_emulation_fault(uint64_t ept_qual) 1898 { 1899 int read, write; 1900 1901 /* EPT fault on an instruction fetch doesn't make sense here */ 1902 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1903 return (false); 1904 1905 /* EPT fault must be a read fault or a write fault */ 1906 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1907 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1908 if ((read | write) == 0) 1909 return (false); 1910 1911 /* 1912 * The EPT violation must have been caused by accessing a 1913 * guest-physical address that is a translation of a guest-linear 1914 * address. 1915 */ 1916 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1917 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1918 return (false); 1919 } 1920 1921 return (true); 1922 } 1923 1924 static __inline int 1925 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1926 { 1927 uint32_t proc_ctls2; 1928 1929 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1930 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1931 } 1932 1933 static __inline int 1934 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1935 { 1936 uint32_t proc_ctls2; 1937 1938 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1939 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1940 } 1941 1942 static int 1943 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1944 uint64_t qual) 1945 { 1946 const uint_t offset = APIC_WRITE_OFFSET(qual); 1947 1948 if (!apic_access_virtualization(vmx, vcpuid)) { 1949 /* 1950 * In general there should not be any APIC write VM-exits 1951 * unless APIC-access virtualization is enabled. 1952 * 1953 * However self-IPI virtualization can legitimately trigger 1954 * an APIC-write VM-exit so treat it specially. 1955 */ 1956 if (x2apic_virtualization(vmx, vcpuid) && 1957 offset == APIC_OFFSET_SELF_IPI) { 1958 const uint32_t *apic_regs = 1959 (uint32_t *)(vlapic->apic_page); 1960 const uint32_t vector = 1961 apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1962 1963 vlapic_self_ipi_handler(vlapic, vector); 1964 return (HANDLED); 1965 } else 1966 return (UNHANDLED); 1967 } 1968 1969 switch (offset) { 1970 case APIC_OFFSET_ID: 1971 vlapic_id_write_handler(vlapic); 1972 break; 1973 case APIC_OFFSET_LDR: 1974 vlapic_ldr_write_handler(vlapic); 1975 break; 1976 case APIC_OFFSET_DFR: 1977 vlapic_dfr_write_handler(vlapic); 1978 break; 1979 case APIC_OFFSET_SVR: 1980 vlapic_svr_write_handler(vlapic); 1981 break; 1982 case APIC_OFFSET_ESR: 1983 vlapic_esr_write_handler(vlapic); 1984 break; 1985 case APIC_OFFSET_ICR_LOW: 1986 vlapic_icrlo_write_handler(vlapic); 1987 break; 1988 case APIC_OFFSET_CMCI_LVT: 1989 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 1990 vlapic_lvt_write_handler(vlapic, offset); 1991 break; 1992 case APIC_OFFSET_TIMER_ICR: 1993 vlapic_icrtmr_write_handler(vlapic); 1994 break; 1995 case APIC_OFFSET_TIMER_DCR: 1996 vlapic_dcr_write_handler(vlapic); 1997 break; 1998 default: 1999 return (UNHANDLED); 2000 } 2001 return (HANDLED); 2002 } 2003 2004 static bool 2005 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2006 { 2007 2008 if (apic_access_virtualization(vmx, vcpuid) && 2009 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2010 return (true); 2011 else 2012 return (false); 2013 } 2014 2015 static int 2016 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2017 { 2018 uint64_t qual; 2019 int access_type, offset, allowed; 2020 struct vie *vie; 2021 2022 if (!apic_access_virtualization(vmx, vcpuid)) 2023 return (UNHANDLED); 2024 2025 qual = vmexit->u.vmx.exit_qualification; 2026 access_type = APIC_ACCESS_TYPE(qual); 2027 offset = APIC_ACCESS_OFFSET(qual); 2028 2029 allowed = 0; 2030 if (access_type == 0) { 2031 /* 2032 * Read data access to the following registers is expected. 2033 */ 2034 switch (offset) { 2035 case APIC_OFFSET_APR: 2036 case APIC_OFFSET_PPR: 2037 case APIC_OFFSET_RRR: 2038 case APIC_OFFSET_CMCI_LVT: 2039 case APIC_OFFSET_TIMER_CCR: 2040 allowed = 1; 2041 break; 2042 default: 2043 break; 2044 } 2045 } else if (access_type == 1) { 2046 /* 2047 * Write data access to the following registers is expected. 2048 */ 2049 switch (offset) { 2050 case APIC_OFFSET_VER: 2051 case APIC_OFFSET_APR: 2052 case APIC_OFFSET_PPR: 2053 case APIC_OFFSET_RRR: 2054 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2055 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2056 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2057 case APIC_OFFSET_CMCI_LVT: 2058 case APIC_OFFSET_TIMER_CCR: 2059 allowed = 1; 2060 break; 2061 default: 2062 break; 2063 } 2064 } 2065 2066 if (allowed) { 2067 vie = vm_vie_ctx(vmx->vm, vcpuid); 2068 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2069 VIE_INVALID_GLA); 2070 } 2071 2072 /* 2073 * Regardless of whether the APIC-access is allowed this handler 2074 * always returns UNHANDLED: 2075 * - if the access is allowed then it is handled by emulating the 2076 * instruction that caused the VM-exit (outside the critical section) 2077 * - if the access is not allowed then it will be converted to an 2078 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2079 */ 2080 return (UNHANDLED); 2081 } 2082 2083 static enum task_switch_reason 2084 vmx_task_switch_reason(uint64_t qual) 2085 { 2086 int reason; 2087 2088 reason = (qual >> 30) & 0x3; 2089 switch (reason) { 2090 case 0: 2091 return (TSR_CALL); 2092 case 1: 2093 return (TSR_IRET); 2094 case 2: 2095 return (TSR_JMP); 2096 case 3: 2097 return (TSR_IDT_GATE); 2098 default: 2099 panic("%s: invalid reason %d", __func__, reason); 2100 } 2101 } 2102 2103 static int 2104 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit, 2105 bool is_wrmsr) 2106 { 2107 struct vmxctx *vmxctx = &vmx->ctx[vcpuid]; 2108 const uint32_t ecx = vmxctx->guest_rcx; 2109 vm_msr_result_t res; 2110 uint64_t val = 0; 2111 2112 if (is_wrmsr) { 2113 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2114 val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax; 2115 2116 if (vlapic_owned_msr(ecx)) { 2117 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2118 2119 res = vlapic_wrmsr(vlapic, ecx, val); 2120 } else { 2121 res = vmx_wrmsr(vmx, vcpuid, ecx, val); 2122 } 2123 } else { 2124 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2125 2126 if (vlapic_owned_msr(ecx)) { 2127 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2128 2129 res = vlapic_rdmsr(vlapic, ecx, &val); 2130 } else { 2131 res = vmx_rdmsr(vmx, vcpuid, ecx, &val); 2132 } 2133 } 2134 2135 switch (res) { 2136 case VMR_OK: 2137 /* Store rdmsr result in the appropriate registers */ 2138 if (!is_wrmsr) { 2139 vmxctx->guest_rax = (uint32_t)val; 2140 vmxctx->guest_rdx = val >> 32; 2141 } 2142 return (HANDLED); 2143 case VMR_GP: 2144 vm_inject_gp(vmx->vm, vcpuid); 2145 return (HANDLED); 2146 case VMR_UNHANLDED: 2147 vmexit->exitcode = is_wrmsr ? 2148 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 2149 vmexit->u.msr.code = ecx; 2150 vmexit->u.msr.wval = val; 2151 return (UNHANDLED); 2152 default: 2153 panic("unexpected msr result %u\n", res); 2154 } 2155 } 2156 2157 static int 2158 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2159 { 2160 int error, errcode, errcode_valid, handled; 2161 struct vmxctx *vmxctx; 2162 struct vie *vie; 2163 struct vlapic *vlapic; 2164 struct vm_task_switch *ts; 2165 uint32_t idtvec_info, intr_info; 2166 uint32_t intr_type, intr_vec, reason; 2167 uint64_t qual, gpa; 2168 2169 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2170 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2171 2172 handled = UNHANDLED; 2173 vmxctx = &vmx->ctx[vcpu]; 2174 2175 qual = vmexit->u.vmx.exit_qualification; 2176 reason = vmexit->u.vmx.exit_reason; 2177 vmexit->exitcode = VM_EXITCODE_BOGUS; 2178 2179 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2180 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2181 2182 /* 2183 * VM-entry failures during or after loading guest state. 2184 * 2185 * These VM-exits are uncommon but must be handled specially 2186 * as most VM-exit fields are not populated as usual. 2187 */ 2188 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2189 vmm_call_trap(T_MCE); 2190 return (1); 2191 } 2192 2193 /* 2194 * VM exits that can be triggered during event delivery need to 2195 * be handled specially by re-injecting the event if the IDT 2196 * vectoring information field's valid bit is set. 2197 * 2198 * See "Information for VM Exits During Event Delivery" in Intel SDM 2199 * for details. 2200 */ 2201 idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO); 2202 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2203 /* Record exit intinfo */ 2204 VERIFY0(vm_exit_intinfo(vmx->vm, vcpu, 2205 vmx_idtvec_to_intinfo(idtvec_info))); 2206 2207 /* 2208 * If 'virtual NMIs' are being used and the VM-exit 2209 * happened while injecting an NMI during the previous 2210 * VM-entry, then clear "blocking by NMI" in the 2211 * Guest Interruptibility-State so the NMI can be 2212 * reinjected on the subsequent VM-entry. 2213 * 2214 * However, if the NMI was being delivered through a task 2215 * gate, then the new task must start execution with NMIs 2216 * blocked so don't clear NMI blocking in this case. 2217 */ 2218 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2219 if (intr_type == VMCS_INTR_T_NMI) { 2220 if (reason != EXIT_REASON_TASK_SWITCH) 2221 vmx_clear_nmi_blocking(vmx, vcpu); 2222 else 2223 vmx_assert_nmi_blocking(vmx, vcpu); 2224 } 2225 2226 /* 2227 * Update VM-entry instruction length if the event being 2228 * delivered was a software interrupt or software exception. 2229 */ 2230 if (intr_type == VMCS_INTR_T_SWINTR || 2231 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2232 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2233 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2234 } 2235 } 2236 2237 switch (reason) { 2238 case EXIT_REASON_TRIPLE_FAULT: 2239 (void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT); 2240 handled = HANDLED; 2241 break; 2242 case EXIT_REASON_TASK_SWITCH: 2243 ts = &vmexit->u.task_switch; 2244 ts->tsssel = qual & 0xffff; 2245 ts->reason = vmx_task_switch_reason(qual); 2246 ts->ext = 0; 2247 ts->errcode_valid = 0; 2248 vmx_paging_info(&ts->paging); 2249 /* 2250 * If the task switch was due to a CALL, JMP, IRET, software 2251 * interrupt (INT n) or software exception (INT3, INTO), 2252 * then the saved %rip references the instruction that caused 2253 * the task switch. The instruction length field in the VMCS 2254 * is valid in this case. 2255 * 2256 * In all other cases (e.g., NMI, hardware exception) the 2257 * saved %rip is one that would have been saved in the old TSS 2258 * had the task switch completed normally so the instruction 2259 * length field is not needed in this case and is explicitly 2260 * set to 0. 2261 */ 2262 if (ts->reason == TSR_IDT_GATE) { 2263 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2264 ("invalid idtvec_info %x for IDT task switch", 2265 idtvec_info)); 2266 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2267 if (intr_type != VMCS_INTR_T_SWINTR && 2268 intr_type != VMCS_INTR_T_SWEXCEPTION && 2269 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2270 /* Task switch triggered by external event */ 2271 ts->ext = 1; 2272 vmexit->inst_length = 0; 2273 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2274 ts->errcode_valid = 1; 2275 ts->errcode = 2276 vmcs_read(VMCS_IDT_VECTORING_ERROR); 2277 } 2278 } 2279 } 2280 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2281 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2282 break; 2283 case EXIT_REASON_CR_ACCESS: 2284 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2285 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2286 switch (qual & 0xf) { 2287 case 0: 2288 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2289 break; 2290 case 4: 2291 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2292 break; 2293 case 8: 2294 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2295 break; 2296 } 2297 break; 2298 case EXIT_REASON_RDMSR: 2299 case EXIT_REASON_WRMSR: 2300 handled = vmx_handle_msr(vmx, vcpu, vmexit, 2301 reason == EXIT_REASON_WRMSR); 2302 break; 2303 case EXIT_REASON_HLT: 2304 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2305 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2306 vmexit->exitcode = VM_EXITCODE_HLT; 2307 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2308 break; 2309 case EXIT_REASON_MTF: 2310 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2311 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2312 vmexit->exitcode = VM_EXITCODE_MTRAP; 2313 vmexit->inst_length = 0; 2314 break; 2315 case EXIT_REASON_PAUSE: 2316 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2317 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2318 vmexit->exitcode = VM_EXITCODE_PAUSE; 2319 break; 2320 case EXIT_REASON_INTR_WINDOW: 2321 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2322 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2323 vmx_clear_int_window_exiting(vmx, vcpu); 2324 return (1); 2325 case EXIT_REASON_EXT_INTR: 2326 /* 2327 * External interrupts serve only to cause VM exits and allow 2328 * the host interrupt handler to run. 2329 * 2330 * If this external interrupt triggers a virtual interrupt 2331 * to a VM, then that state will be recorded by the 2332 * host interrupt handler in the VM's softc. We will inject 2333 * this virtual interrupt during the subsequent VM enter. 2334 */ 2335 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2336 SDT_PROBE4(vmm, vmx, exit, interrupt, 2337 vmx, vcpu, vmexit, intr_info); 2338 2339 /* 2340 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2341 * This appears to be a bug in VMware Fusion? 2342 */ 2343 if (!(intr_info & VMCS_INTR_VALID)) 2344 return (1); 2345 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2346 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2347 ("VM exit interruption info invalid: %x", intr_info)); 2348 vmx_trigger_hostintr(intr_info & 0xff); 2349 2350 /* 2351 * This is special. We want to treat this as an 'handled' 2352 * VM-exit but not increment the instruction pointer. 2353 */ 2354 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2355 return (1); 2356 case EXIT_REASON_NMI_WINDOW: 2357 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2358 /* Exit to allow the pending virtual NMI to be injected */ 2359 if (vm_nmi_pending(vmx->vm, vcpu)) 2360 vmx_inject_nmi(vmx, vcpu); 2361 vmx_clear_nmi_window_exiting(vmx, vcpu); 2362 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2363 return (1); 2364 case EXIT_REASON_INOUT: 2365 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2366 vie = vm_vie_ctx(vmx->vm, vcpu); 2367 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2368 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2369 break; 2370 case EXIT_REASON_CPUID: 2371 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2372 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2373 vcpu_emulate_cpuid(vmx->vm, vcpu, 2374 (uint64_t *)&vmxctx->guest_rax, 2375 (uint64_t *)&vmxctx->guest_rbx, 2376 (uint64_t *)&vmxctx->guest_rcx, 2377 (uint64_t *)&vmxctx->guest_rdx); 2378 handled = HANDLED; 2379 break; 2380 case EXIT_REASON_EXCEPTION: 2381 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2382 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2383 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2384 ("VM exit interruption info invalid: %x", intr_info)); 2385 2386 intr_vec = intr_info & 0xff; 2387 intr_type = intr_info & VMCS_INTR_T_MASK; 2388 2389 /* 2390 * If Virtual NMIs control is 1 and the VM-exit is due to a 2391 * fault encountered during the execution of IRET then we must 2392 * restore the state of "virtual-NMI blocking" before resuming 2393 * the guest. 2394 * 2395 * See "Resuming Guest Software after Handling an Exception". 2396 * See "Information for VM Exits Due to Vectored Events". 2397 */ 2398 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2399 (intr_vec != IDT_DF) && 2400 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2401 vmx_restore_nmi_blocking(vmx, vcpu); 2402 2403 /* 2404 * The NMI has already been handled in vmx_exit_handle_nmi(). 2405 */ 2406 if (intr_type == VMCS_INTR_T_NMI) 2407 return (1); 2408 2409 /* 2410 * Call the machine check handler by hand. Also don't reflect 2411 * the machine check back into the guest. 2412 */ 2413 if (intr_vec == IDT_MC) { 2414 vmm_call_trap(T_MCE); 2415 return (1); 2416 } 2417 2418 /* 2419 * If the hypervisor has requested user exits for 2420 * debug exceptions, bounce them out to userland. 2421 */ 2422 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2423 intr_vec == IDT_BP && 2424 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2425 vmexit->exitcode = VM_EXITCODE_BPT; 2426 vmexit->u.bpt.inst_length = vmexit->inst_length; 2427 vmexit->inst_length = 0; 2428 break; 2429 } 2430 2431 if (intr_vec == IDT_PF) { 2432 vmxctx->guest_cr2 = qual; 2433 } 2434 2435 /* 2436 * Software exceptions exhibit trap-like behavior. This in 2437 * turn requires populating the VM-entry instruction length 2438 * so that the %rip in the trap frame is past the INT3/INTO 2439 * instruction. 2440 */ 2441 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2442 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2443 2444 /* Reflect all other exceptions back into the guest */ 2445 errcode_valid = errcode = 0; 2446 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2447 errcode_valid = 1; 2448 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2449 } 2450 SDT_PROBE5(vmm, vmx, exit, exception, 2451 vmx, vcpu, vmexit, intr_vec, errcode); 2452 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2453 errcode_valid, errcode, 0); 2454 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2455 __func__, error)); 2456 return (1); 2457 2458 case EXIT_REASON_EPT_FAULT: 2459 /* 2460 * If 'gpa' lies within the address space allocated to 2461 * memory then this must be a nested page fault otherwise 2462 * this must be an instruction that accesses MMIO space. 2463 */ 2464 gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS); 2465 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2466 apic_access_fault(vmx, vcpu, gpa)) { 2467 vmexit->exitcode = VM_EXITCODE_PAGING; 2468 vmexit->inst_length = 0; 2469 vmexit->u.paging.gpa = gpa; 2470 vmexit->u.paging.fault_type = ept_fault_type(qual); 2471 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2472 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2473 vmx, vcpu, vmexit, gpa, qual); 2474 } else if (ept_emulation_fault(qual)) { 2475 vie = vm_vie_ctx(vmx->vm, vcpu); 2476 vmexit_mmio_emul(vmexit, vie, gpa, 2477 vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)); 2478 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2479 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2480 vmx, vcpu, vmexit, gpa); 2481 } 2482 /* 2483 * If Virtual NMIs control is 1 and the VM-exit is due to an 2484 * EPT fault during the execution of IRET then we must restore 2485 * the state of "virtual-NMI blocking" before resuming. 2486 * 2487 * See description of "NMI unblocking due to IRET" in 2488 * "Exit Qualification for EPT Violations". 2489 */ 2490 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2491 (qual & EXIT_QUAL_NMIUDTI) != 0) 2492 vmx_restore_nmi_blocking(vmx, vcpu); 2493 break; 2494 case EXIT_REASON_VIRTUALIZED_EOI: 2495 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2496 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2497 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2498 vmexit->inst_length = 0; /* trap-like */ 2499 break; 2500 case EXIT_REASON_APIC_ACCESS: 2501 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2502 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2503 break; 2504 case EXIT_REASON_APIC_WRITE: 2505 /* 2506 * APIC-write VM exit is trap-like so the %rip is already 2507 * pointing to the next instruction. 2508 */ 2509 vmexit->inst_length = 0; 2510 vlapic = vm_lapic(vmx->vm, vcpu); 2511 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2512 vmx, vcpu, vmexit, vlapic); 2513 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2514 break; 2515 case EXIT_REASON_XSETBV: 2516 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2517 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2518 break; 2519 case EXIT_REASON_MONITOR: 2520 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2521 vmexit->exitcode = VM_EXITCODE_MONITOR; 2522 break; 2523 case EXIT_REASON_MWAIT: 2524 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2525 vmexit->exitcode = VM_EXITCODE_MWAIT; 2526 break; 2527 case EXIT_REASON_TPR: 2528 vlapic = vm_lapic(vmx->vm, vcpu); 2529 vlapic_sync_tpr(vlapic); 2530 vmexit->inst_length = 0; 2531 handled = HANDLED; 2532 break; 2533 case EXIT_REASON_VMCALL: 2534 case EXIT_REASON_VMCLEAR: 2535 case EXIT_REASON_VMLAUNCH: 2536 case EXIT_REASON_VMPTRLD: 2537 case EXIT_REASON_VMPTRST: 2538 case EXIT_REASON_VMREAD: 2539 case EXIT_REASON_VMRESUME: 2540 case EXIT_REASON_VMWRITE: 2541 case EXIT_REASON_VMXOFF: 2542 case EXIT_REASON_VMXON: 2543 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2544 vmexit->exitcode = VM_EXITCODE_VMINSN; 2545 break; 2546 case EXIT_REASON_INVD: 2547 case EXIT_REASON_WBINVD: 2548 /* ignore exit */ 2549 handled = HANDLED; 2550 break; 2551 default: 2552 SDT_PROBE4(vmm, vmx, exit, unknown, 2553 vmx, vcpu, vmexit, reason); 2554 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2555 break; 2556 } 2557 2558 if (handled) { 2559 /* 2560 * It is possible that control is returned to userland 2561 * even though we were able to handle the VM exit in the 2562 * kernel. 2563 * 2564 * In such a case we want to make sure that the userland 2565 * restarts guest execution at the instruction *after* 2566 * the one we just processed. Therefore we update the 2567 * guest rip in the VMCS and in 'vmexit'. 2568 */ 2569 vmexit->rip += vmexit->inst_length; 2570 vmexit->inst_length = 0; 2571 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2572 } else { 2573 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2574 /* 2575 * If this VM exit was not claimed by anybody then 2576 * treat it as a generic VMX exit. 2577 */ 2578 vmexit->exitcode = VM_EXITCODE_VMX; 2579 vmexit->u.vmx.status = VM_SUCCESS; 2580 vmexit->u.vmx.inst_type = 0; 2581 vmexit->u.vmx.inst_error = 0; 2582 } else { 2583 /* 2584 * The exitcode and collateral have been populated. 2585 * The VM exit will be processed further in userland. 2586 */ 2587 } 2588 } 2589 2590 SDT_PROBE4(vmm, vmx, exit, return, 2591 vmx, vcpu, vmexit, handled); 2592 return (handled); 2593 } 2594 2595 static void 2596 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2597 { 2598 2599 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2600 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2601 vmxctx->inst_fail_status)); 2602 2603 vmexit->inst_length = 0; 2604 vmexit->exitcode = VM_EXITCODE_VMX; 2605 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2606 vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR); 2607 vmexit->u.vmx.exit_reason = ~0; 2608 vmexit->u.vmx.exit_qualification = ~0; 2609 2610 switch (rc) { 2611 case VMX_VMRESUME_ERROR: 2612 case VMX_VMLAUNCH_ERROR: 2613 case VMX_INVEPT_ERROR: 2614 case VMX_VMWRITE_ERROR: 2615 vmexit->u.vmx.inst_type = rc; 2616 break; 2617 default: 2618 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2619 } 2620 } 2621 2622 /* 2623 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2624 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2625 * sufficient to simply vector to the NMI handler via a software interrupt. 2626 * However, this must be done before maskable interrupts are enabled 2627 * otherwise the "iret" issued by an interrupt handler will incorrectly 2628 * clear NMI blocking. 2629 */ 2630 static __inline void 2631 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2632 { 2633 ASSERT(!interrupts_enabled()); 2634 2635 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2636 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2637 ASSERT(intr_info & VMCS_INTR_VALID); 2638 2639 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2640 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2641 vmm_call_trap(T_NMIFLT); 2642 } 2643 } 2644 } 2645 2646 static __inline void 2647 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2648 { 2649 uint64_t rflags; 2650 2651 /* Save host control debug registers. */ 2652 vmxctx->host_dr7 = rdr7(); 2653 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2654 2655 /* 2656 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2657 * exceptions in the host based on the guest DRx values. The 2658 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2659 */ 2660 load_dr7(0); 2661 wrmsr(MSR_DEBUGCTLMSR, 0); 2662 2663 /* 2664 * Disable single stepping the kernel to avoid corrupting the 2665 * guest DR6. A debugger might still be able to corrupt the 2666 * guest DR6 by setting a breakpoint after this point and then 2667 * single stepping. 2668 */ 2669 rflags = read_rflags(); 2670 vmxctx->host_tf = rflags & PSL_T; 2671 write_rflags(rflags & ~PSL_T); 2672 2673 /* Save host debug registers. */ 2674 vmxctx->host_dr0 = rdr0(); 2675 vmxctx->host_dr1 = rdr1(); 2676 vmxctx->host_dr2 = rdr2(); 2677 vmxctx->host_dr3 = rdr3(); 2678 vmxctx->host_dr6 = rdr6(); 2679 2680 /* Restore guest debug registers. */ 2681 load_dr0(vmxctx->guest_dr0); 2682 load_dr1(vmxctx->guest_dr1); 2683 load_dr2(vmxctx->guest_dr2); 2684 load_dr3(vmxctx->guest_dr3); 2685 load_dr6(vmxctx->guest_dr6); 2686 } 2687 2688 static __inline void 2689 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2690 { 2691 2692 /* Save guest debug registers. */ 2693 vmxctx->guest_dr0 = rdr0(); 2694 vmxctx->guest_dr1 = rdr1(); 2695 vmxctx->guest_dr2 = rdr2(); 2696 vmxctx->guest_dr3 = rdr3(); 2697 vmxctx->guest_dr6 = rdr6(); 2698 2699 /* 2700 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2701 * PSL_T last. 2702 */ 2703 load_dr0(vmxctx->host_dr0); 2704 load_dr1(vmxctx->host_dr1); 2705 load_dr2(vmxctx->host_dr2); 2706 load_dr3(vmxctx->host_dr3); 2707 load_dr6(vmxctx->host_dr6); 2708 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2709 load_dr7(vmxctx->host_dr7); 2710 write_rflags(read_rflags() | vmxctx->host_tf); 2711 } 2712 2713 static int 2714 vmx_run(void *arg, int vcpu, uint64_t rip) 2715 { 2716 int rc, handled, launched; 2717 struct vmx *vmx; 2718 struct vm *vm; 2719 struct vmxctx *vmxctx; 2720 uintptr_t vmcs_pa; 2721 struct vm_exit *vmexit; 2722 struct vlapic *vlapic; 2723 uint32_t exit_reason; 2724 bool tpr_shadow_active; 2725 vm_client_t *vmc; 2726 2727 vmx = arg; 2728 vm = vmx->vm; 2729 vmcs_pa = vmx->vmcs_pa[vcpu]; 2730 vmxctx = &vmx->ctx[vcpu]; 2731 vlapic = vm_lapic(vm, vcpu); 2732 vmexit = vm_exitinfo(vm, vcpu); 2733 vmc = vm_get_vmclient(vm, vcpu); 2734 launched = 0; 2735 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2736 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2737 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2738 2739 vmx_msr_guest_enter(vmx, vcpu); 2740 2741 vmcs_load(vmcs_pa); 2742 2743 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2744 vmx->vmcs_state[vcpu] = VS_LOADED; 2745 2746 /* 2747 * XXX 2748 * We do this every time because we may setup the virtual machine 2749 * from a different process than the one that actually runs it. 2750 * 2751 * If the life of a virtual machine was spent entirely in the context 2752 * of a single process we could do this once in vmx_vminit(). 2753 */ 2754 vmcs_write(VMCS_HOST_CR3, rcr3()); 2755 2756 vmcs_write(VMCS_GUEST_RIP, rip); 2757 vmx_set_pcpu_defaults(vmx, vcpu); 2758 do { 2759 enum event_inject_state inject_state; 2760 uint64_t eptgen; 2761 2762 ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip); 2763 2764 handled = UNHANDLED; 2765 2766 /* 2767 * Perform initial event/exception/interrupt injection before 2768 * host CPU interrupts are disabled. 2769 */ 2770 inject_state = vmx_inject_events(vmx, vcpu, rip); 2771 2772 /* 2773 * Interrupts are disabled from this point on until the 2774 * guest starts executing. This is done for the following 2775 * reasons: 2776 * 2777 * If an AST is asserted on this thread after the check below, 2778 * then the IPI_AST notification will not be lost, because it 2779 * will cause a VM exit due to external interrupt as soon as 2780 * the guest state is loaded. 2781 * 2782 * A posted interrupt after vmx_inject_vlapic() will not be 2783 * "lost" because it will be held pending in the host APIC 2784 * because interrupts are disabled. The pending interrupt will 2785 * be recognized as soon as the guest state is loaded. 2786 * 2787 * The same reasoning applies to the IPI generated by vmspace 2788 * invalidation. 2789 */ 2790 disable_intr(); 2791 2792 /* 2793 * If not precluded by existing events, inject any interrupt 2794 * pending on the vLAPIC. As a lock-less operation, it is safe 2795 * (and prudent) to perform with host CPU interrupts disabled. 2796 */ 2797 if (inject_state == EIS_CAN_INJECT) { 2798 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2799 } 2800 2801 /* 2802 * Check for vCPU bail-out conditions. This must be done after 2803 * vmx_inject_events() to detect a triple-fault condition. 2804 */ 2805 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2806 enable_intr(); 2807 break; 2808 } 2809 2810 if (vcpu_run_state_pending(vm, vcpu)) { 2811 enable_intr(); 2812 vm_exit_run_state(vmx->vm, vcpu, rip); 2813 break; 2814 } 2815 2816 /* 2817 * If subsequent activity queued events which require injection 2818 * handling, take another lap to handle them. 2819 */ 2820 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2821 enable_intr(); 2822 handled = HANDLED; 2823 continue; 2824 } 2825 2826 if ((rc = smt_acquire()) != 1) { 2827 enable_intr(); 2828 vmexit->rip = rip; 2829 vmexit->inst_length = 0; 2830 if (rc == -1) { 2831 vmexit->exitcode = VM_EXITCODE_HT; 2832 } else { 2833 vmexit->exitcode = VM_EXITCODE_BOGUS; 2834 handled = HANDLED; 2835 } 2836 break; 2837 } 2838 2839 /* 2840 * If this thread has gone off-cpu due to mutex operations 2841 * during vmx_run, the VMCS will have been unloaded, forcing a 2842 * re-VMLAUNCH as opposed to VMRESUME. 2843 */ 2844 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2845 /* 2846 * Restoration of the GDT limit is taken care of by 2847 * vmx_savectx(). Since the maximum practical index for the 2848 * IDT is 255, restoring its limits from the post-VMX-exit 2849 * default of 0xffff is not a concern. 2850 * 2851 * Only 64-bit hypervisor callers are allowed, which forgoes 2852 * the need to restore any LDT descriptor. Toss an error to 2853 * anyone attempting to break that rule. 2854 */ 2855 if (curproc->p_model != DATAMODEL_LP64) { 2856 smt_release(); 2857 enable_intr(); 2858 bzero(vmexit, sizeof (*vmexit)); 2859 vmexit->rip = rip; 2860 vmexit->exitcode = VM_EXITCODE_VMX; 2861 vmexit->u.vmx.status = VM_FAIL_INVALID; 2862 handled = UNHANDLED; 2863 break; 2864 } 2865 2866 if (tpr_shadow_active) { 2867 vmx_tpr_shadow_enter(vlapic); 2868 } 2869 2870 /* 2871 * Indicate activation of vmspace (EPT) table just prior to VMX 2872 * entry, checking for the necessity of an invept invalidation. 2873 */ 2874 eptgen = vmc_table_enter(vmc); 2875 if (vmx->eptgen[curcpu] != eptgen) { 2876 /* 2877 * VMspace generation does not match what was previously 2878 * used on this host CPU, so all mappings associated 2879 * with this EP4TA must be invalidated. 2880 */ 2881 invept(1, vmx->eptp); 2882 vmx->eptgen[curcpu] = eptgen; 2883 } 2884 2885 vcpu_ustate_change(vm, vcpu, VU_RUN); 2886 vmx_dr_enter_guest(vmxctx); 2887 2888 /* Perform VMX entry */ 2889 rc = vmx_enter_guest(vmxctx, vmx, launched); 2890 2891 vmx_dr_leave_guest(vmxctx); 2892 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2893 2894 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2895 smt_release(); 2896 2897 if (tpr_shadow_active) { 2898 vmx_tpr_shadow_exit(vlapic); 2899 } 2900 2901 /* Collect some information for VM exit processing */ 2902 vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP); 2903 vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH); 2904 vmexit->u.vmx.exit_reason = exit_reason = 2905 (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK); 2906 vmexit->u.vmx.exit_qualification = 2907 vmcs_read(VMCS_EXIT_QUALIFICATION); 2908 /* Update 'nextrip' */ 2909 vmx->state[vcpu].nextrip = rip; 2910 2911 if (rc == VMX_GUEST_VMEXIT) { 2912 vmx_exit_handle_possible_nmi(vmexit); 2913 } 2914 enable_intr(); 2915 vmc_table_exit(vmc); 2916 2917 if (rc == VMX_GUEST_VMEXIT) { 2918 handled = vmx_exit_process(vmx, vcpu, vmexit); 2919 } else { 2920 vmx_exit_inst_error(vmxctx, rc, vmexit); 2921 } 2922 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2923 uint32_t, exit_reason); 2924 rip = vmexit->rip; 2925 } while (handled); 2926 2927 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2928 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2929 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2930 vmexit->exitcode); 2931 } 2932 2933 vmcs_clear(vmcs_pa); 2934 vmx_msr_guest_exit(vmx, vcpu); 2935 2936 VERIFY(vmx->vmcs_state[vcpu] != VS_NONE && curthread->t_preempt != 0); 2937 vmx->vmcs_state[vcpu] = VS_NONE; 2938 2939 return (0); 2940 } 2941 2942 static void 2943 vmx_vmcleanup(void *arg) 2944 { 2945 int i; 2946 struct vmx *vmx = arg; 2947 uint16_t maxcpus; 2948 2949 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2950 (void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2951 kmem_free(vmx->apic_access_page, PAGESIZE); 2952 } else { 2953 VERIFY3P(vmx->apic_access_page, ==, NULL); 2954 } 2955 2956 vmx_msr_bitmap_destroy(vmx); 2957 2958 maxcpus = vm_get_maxcpus(vmx->vm); 2959 for (i = 0; i < maxcpus; i++) 2960 vpid_free(vmx->state[i].vpid); 2961 2962 kmem_free(vmx, sizeof (*vmx)); 2963 } 2964 2965 /* 2966 * Ensure that the VMCS for this vcpu is loaded. 2967 * Returns true if a VMCS load was required. 2968 */ 2969 static bool 2970 vmx_vmcs_access_ensure(struct vmx *vmx, int vcpu) 2971 { 2972 int hostcpu; 2973 2974 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 2975 if (hostcpu != curcpu) { 2976 panic("unexpected vcpu migration %d != %d", 2977 hostcpu, curcpu); 2978 } 2979 /* Earlier logic already took care of the load */ 2980 return (false); 2981 } else { 2982 vmcs_load(vmx->vmcs_pa[vcpu]); 2983 return (true); 2984 } 2985 } 2986 2987 static void 2988 vmx_vmcs_access_done(struct vmx *vmx, int vcpu) 2989 { 2990 int hostcpu; 2991 2992 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 2993 if (hostcpu != curcpu) { 2994 panic("unexpected vcpu migration %d != %d", 2995 hostcpu, curcpu); 2996 } 2997 /* Later logic will take care of the unload */ 2998 } else { 2999 vmcs_clear(vmx->vmcs_pa[vcpu]); 3000 } 3001 } 3002 3003 static uint64_t * 3004 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3005 { 3006 switch (reg) { 3007 case VM_REG_GUEST_RAX: 3008 return (&vmxctx->guest_rax); 3009 case VM_REG_GUEST_RBX: 3010 return (&vmxctx->guest_rbx); 3011 case VM_REG_GUEST_RCX: 3012 return (&vmxctx->guest_rcx); 3013 case VM_REG_GUEST_RDX: 3014 return (&vmxctx->guest_rdx); 3015 case VM_REG_GUEST_RSI: 3016 return (&vmxctx->guest_rsi); 3017 case VM_REG_GUEST_RDI: 3018 return (&vmxctx->guest_rdi); 3019 case VM_REG_GUEST_RBP: 3020 return (&vmxctx->guest_rbp); 3021 case VM_REG_GUEST_R8: 3022 return (&vmxctx->guest_r8); 3023 case VM_REG_GUEST_R9: 3024 return (&vmxctx->guest_r9); 3025 case VM_REG_GUEST_R10: 3026 return (&vmxctx->guest_r10); 3027 case VM_REG_GUEST_R11: 3028 return (&vmxctx->guest_r11); 3029 case VM_REG_GUEST_R12: 3030 return (&vmxctx->guest_r12); 3031 case VM_REG_GUEST_R13: 3032 return (&vmxctx->guest_r13); 3033 case VM_REG_GUEST_R14: 3034 return (&vmxctx->guest_r14); 3035 case VM_REG_GUEST_R15: 3036 return (&vmxctx->guest_r15); 3037 case VM_REG_GUEST_CR2: 3038 return (&vmxctx->guest_cr2); 3039 case VM_REG_GUEST_DR0: 3040 return (&vmxctx->guest_dr0); 3041 case VM_REG_GUEST_DR1: 3042 return (&vmxctx->guest_dr1); 3043 case VM_REG_GUEST_DR2: 3044 return (&vmxctx->guest_dr2); 3045 case VM_REG_GUEST_DR3: 3046 return (&vmxctx->guest_dr3); 3047 case VM_REG_GUEST_DR6: 3048 return (&vmxctx->guest_dr6); 3049 default: 3050 break; 3051 } 3052 return (NULL); 3053 } 3054 3055 static int 3056 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3057 { 3058 struct vmx *vmx = arg; 3059 uint64_t *regp; 3060 3061 /* VMCS access not required for ctx reads */ 3062 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3063 *retval = *regp; 3064 return (0); 3065 } 3066 3067 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3068 int err = 0; 3069 3070 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3071 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3072 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3073 } else { 3074 uint32_t encoding; 3075 3076 encoding = vmcs_field_encoding(reg); 3077 switch (encoding) { 3078 case VMCS_GUEST_CR0: 3079 /* Take the shadow bits into account */ 3080 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3081 vmcs_read(VMCS_CR0_SHADOW)); 3082 break; 3083 case VMCS_GUEST_CR4: 3084 /* Take the shadow bits into account */ 3085 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3086 vmcs_read(VMCS_CR4_SHADOW)); 3087 break; 3088 case VMCS_INVALID_ENCODING: 3089 err = EINVAL; 3090 break; 3091 default: 3092 *retval = vmcs_read(encoding); 3093 break; 3094 } 3095 } 3096 3097 if (vmcs_loaded) { 3098 vmx_vmcs_access_done(vmx, vcpu); 3099 } 3100 return (err); 3101 } 3102 3103 static int 3104 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3105 { 3106 struct vmx *vmx = arg; 3107 uint64_t *regp; 3108 3109 /* VMCS access not required for ctx writes */ 3110 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3111 *regp = val; 3112 return (0); 3113 } 3114 3115 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3116 int err = 0; 3117 3118 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3119 if (val != 0) { 3120 /* 3121 * Forcing the vcpu into an interrupt shadow is not 3122 * presently supported. 3123 */ 3124 err = EINVAL; 3125 } else { 3126 uint64_t gi; 3127 3128 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3129 gi &= ~HWINTR_BLOCKING; 3130 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3131 err = 0; 3132 } 3133 } else { 3134 uint32_t encoding; 3135 3136 err = 0; 3137 encoding = vmcs_field_encoding(reg); 3138 switch (encoding) { 3139 case VMCS_GUEST_IA32_EFER: 3140 vmcs_write(encoding, val); 3141 vmx_sync_efer_state(vmx, vcpu, val); 3142 break; 3143 case VMCS_GUEST_CR0: 3144 /* 3145 * The guest is not allowed to modify certain bits in 3146 * %cr0 and %cr4. To maintain the illusion of full 3147 * control, they have shadow versions which contain the 3148 * guest-perceived (via reads from the register) values 3149 * as opposed to the guest-effective values. 3150 * 3151 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3152 */ 3153 vmcs_write(VMCS_CR0_SHADOW, val); 3154 vmcs_write(encoding, vmx_fix_cr0(val)); 3155 break; 3156 case VMCS_GUEST_CR4: 3157 /* See above for detail on %cr4 shadowing */ 3158 vmcs_write(VMCS_CR4_SHADOW, val); 3159 vmcs_write(encoding, vmx_fix_cr4(val)); 3160 break; 3161 case VMCS_GUEST_CR3: 3162 vmcs_write(encoding, val); 3163 /* 3164 * Invalidate the guest vcpu's TLB mappings to emulate 3165 * the behavior of updating %cr3. 3166 * 3167 * XXX the processor retains global mappings when %cr3 3168 * is updated but vmx_invvpid() does not. 3169 */ 3170 vmx_invvpid(vmx, vcpu, 3171 vcpu_is_running(vmx->vm, vcpu, NULL)); 3172 break; 3173 case VMCS_INVALID_ENCODING: 3174 err = EINVAL; 3175 break; 3176 default: 3177 vmcs_write(encoding, val); 3178 break; 3179 } 3180 } 3181 3182 if (vmcs_loaded) { 3183 vmx_vmcs_access_done(vmx, vcpu); 3184 } 3185 return (err); 3186 } 3187 3188 static int 3189 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3190 { 3191 struct vmx *vmx = arg; 3192 uint32_t base, limit, access; 3193 3194 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3195 3196 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3197 desc->base = vmcs_read(base); 3198 desc->limit = vmcs_read(limit); 3199 if (access != VMCS_INVALID_ENCODING) { 3200 desc->access = vmcs_read(access); 3201 } else { 3202 desc->access = 0; 3203 } 3204 3205 if (vmcs_loaded) { 3206 vmx_vmcs_access_done(vmx, vcpu); 3207 } 3208 return (0); 3209 } 3210 3211 static int 3212 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3213 { 3214 struct vmx *vmx = arg; 3215 uint32_t base, limit, access; 3216 3217 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3218 3219 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3220 vmcs_write(base, desc->base); 3221 vmcs_write(limit, desc->limit); 3222 if (access != VMCS_INVALID_ENCODING) { 3223 vmcs_write(access, desc->access); 3224 } 3225 3226 if (vmcs_loaded) { 3227 vmx_vmcs_access_done(vmx, vcpu); 3228 } 3229 return (0); 3230 } 3231 3232 static uint64_t * 3233 vmx_msr_ptr(struct vmx *vmx, int vcpu, uint32_t msr) 3234 { 3235 uint64_t *guest_msrs = vmx->guest_msrs[vcpu]; 3236 3237 switch (msr) { 3238 case MSR_LSTAR: 3239 return (&guest_msrs[IDX_MSR_LSTAR]); 3240 case MSR_CSTAR: 3241 return (&guest_msrs[IDX_MSR_CSTAR]); 3242 case MSR_STAR: 3243 return (&guest_msrs[IDX_MSR_STAR]); 3244 case MSR_SF_MASK: 3245 return (&guest_msrs[IDX_MSR_SF_MASK]); 3246 case MSR_KGSBASE: 3247 return (&guest_msrs[IDX_MSR_KGSBASE]); 3248 case MSR_PAT: 3249 return (&guest_msrs[IDX_MSR_PAT]); 3250 default: 3251 return (NULL); 3252 } 3253 } 3254 3255 static int 3256 vmx_msr_get(void *arg, int vcpu, uint32_t msr, uint64_t *valp) 3257 { 3258 struct vmx *vmx = arg; 3259 3260 ASSERT(valp != NULL); 3261 3262 const uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3263 if (msrp != NULL) { 3264 *valp = *msrp; 3265 return (0); 3266 } 3267 3268 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3269 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3270 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3271 3272 *valp = vmcs_read(vmcs_enc); 3273 3274 if (vmcs_loaded) { 3275 vmx_vmcs_access_done(vmx, vcpu); 3276 } 3277 return (0); 3278 } 3279 3280 return (EINVAL); 3281 } 3282 3283 static int 3284 vmx_msr_set(void *arg, int vcpu, uint32_t msr, uint64_t val) 3285 { 3286 struct vmx *vmx = arg; 3287 3288 /* TODO: mask value */ 3289 3290 uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3291 if (msrp != NULL) { 3292 *msrp = val; 3293 return (0); 3294 } 3295 3296 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3297 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3298 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3299 3300 vmcs_write(vmcs_enc, val); 3301 3302 if (msr == MSR_EFER) { 3303 vmx_sync_efer_state(vmx, vcpu, val); 3304 } 3305 3306 if (vmcs_loaded) { 3307 vmx_vmcs_access_done(vmx, vcpu); 3308 } 3309 return (0); 3310 } 3311 return (EINVAL); 3312 } 3313 3314 static int 3315 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3316 { 3317 struct vmx *vmx = arg; 3318 int vcap; 3319 int ret; 3320 3321 ret = ENOENT; 3322 3323 vcap = vmx->cap[vcpu].set; 3324 3325 switch (type) { 3326 case VM_CAP_HALT_EXIT: 3327 ret = 0; 3328 break; 3329 case VM_CAP_PAUSE_EXIT: 3330 if (cap_pause_exit) 3331 ret = 0; 3332 break; 3333 case VM_CAP_MTRAP_EXIT: 3334 if (cap_monitor_trap) 3335 ret = 0; 3336 break; 3337 case VM_CAP_ENABLE_INVPCID: 3338 if (cap_invpcid) 3339 ret = 0; 3340 break; 3341 case VM_CAP_BPT_EXIT: 3342 ret = 0; 3343 break; 3344 default: 3345 break; 3346 } 3347 3348 if (ret == 0) 3349 *retval = (vcap & (1 << type)) ? 1 : 0; 3350 3351 return (ret); 3352 } 3353 3354 static int 3355 vmx_setcap(void *arg, int vcpu, int type, int val) 3356 { 3357 struct vmx *vmx = arg; 3358 uint32_t baseval, reg, flag; 3359 uint32_t *pptr; 3360 int error; 3361 3362 error = ENOENT; 3363 pptr = NULL; 3364 3365 switch (type) { 3366 case VM_CAP_HALT_EXIT: 3367 error = 0; 3368 pptr = &vmx->cap[vcpu].proc_ctls; 3369 baseval = *pptr; 3370 flag = PROCBASED_HLT_EXITING; 3371 reg = VMCS_PRI_PROC_BASED_CTLS; 3372 break; 3373 case VM_CAP_MTRAP_EXIT: 3374 if (cap_monitor_trap) { 3375 error = 0; 3376 pptr = &vmx->cap[vcpu].proc_ctls; 3377 baseval = *pptr; 3378 flag = PROCBASED_MTF; 3379 reg = VMCS_PRI_PROC_BASED_CTLS; 3380 } 3381 break; 3382 case VM_CAP_PAUSE_EXIT: 3383 if (cap_pause_exit) { 3384 error = 0; 3385 pptr = &vmx->cap[vcpu].proc_ctls; 3386 baseval = *pptr; 3387 flag = PROCBASED_PAUSE_EXITING; 3388 reg = VMCS_PRI_PROC_BASED_CTLS; 3389 } 3390 break; 3391 case VM_CAP_ENABLE_INVPCID: 3392 if (cap_invpcid) { 3393 error = 0; 3394 pptr = &vmx->cap[vcpu].proc_ctls2; 3395 baseval = *pptr; 3396 flag = PROCBASED2_ENABLE_INVPCID; 3397 reg = VMCS_SEC_PROC_BASED_CTLS; 3398 } 3399 break; 3400 case VM_CAP_BPT_EXIT: 3401 error = 0; 3402 3403 /* Don't change the bitmap if we are tracing all exceptions. */ 3404 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3405 pptr = &vmx->cap[vcpu].exc_bitmap; 3406 baseval = *pptr; 3407 flag = (1 << IDT_BP); 3408 reg = VMCS_EXCEPTION_BITMAP; 3409 } 3410 break; 3411 default: 3412 break; 3413 } 3414 3415 if (error != 0) { 3416 return (error); 3417 } 3418 3419 if (pptr != NULL) { 3420 if (val) { 3421 baseval |= flag; 3422 } else { 3423 baseval &= ~flag; 3424 } 3425 vmcs_load(vmx->vmcs_pa[vcpu]); 3426 vmcs_write(reg, baseval); 3427 vmcs_clear(vmx->vmcs_pa[vcpu]); 3428 3429 /* 3430 * Update optional stored flags, and record 3431 * setting 3432 */ 3433 *pptr = baseval; 3434 } 3435 3436 if (val) { 3437 vmx->cap[vcpu].set |= (1 << type); 3438 } else { 3439 vmx->cap[vcpu].set &= ~(1 << type); 3440 } 3441 3442 return (0); 3443 } 3444 3445 struct vlapic_vtx { 3446 struct vlapic vlapic; 3447 3448 /* Align to the nearest cacheline */ 3449 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3450 3451 /* TMR handling state for posted interrupts */ 3452 uint32_t tmr_active[8]; 3453 uint32_t pending_level[8]; 3454 uint32_t pending_edge[8]; 3455 3456 struct pir_desc *pir_desc; 3457 struct vmx *vmx; 3458 uint_t pending_prio; 3459 boolean_t tmr_sync; 3460 }; 3461 3462 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3463 3464 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3465 3466 static vcpu_notify_t 3467 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3468 { 3469 struct vlapic_vtx *vlapic_vtx; 3470 struct pir_desc *pir_desc; 3471 uint32_t mask, tmrval; 3472 int idx; 3473 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3474 3475 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3476 pir_desc = vlapic_vtx->pir_desc; 3477 idx = vector / 32; 3478 mask = 1UL << (vector % 32); 3479 3480 /* 3481 * If the currently asserted TMRs do not match the state requested by 3482 * the incoming interrupt, an exit will be required to reconcile those 3483 * bits in the APIC page. This will keep the vLAPIC behavior in line 3484 * with the architecturally defined expectations. 3485 * 3486 * If actors of mixed types (edge and level) are racing against the same 3487 * vector (toggling its TMR bit back and forth), the results could 3488 * inconsistent. Such circumstances are considered a rare edge case and 3489 * are never expected to be found in the wild. 3490 */ 3491 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3492 if (!level) { 3493 if ((tmrval & mask) != 0) { 3494 /* Edge-triggered interrupt needs TMR de-asserted */ 3495 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3496 atomic_store_rel_long(&pir_desc->pending, 1); 3497 return (VCPU_NOTIFY_EXIT); 3498 } 3499 } else { 3500 if ((tmrval & mask) == 0) { 3501 /* Level-triggered interrupt needs TMR asserted */ 3502 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3503 atomic_store_rel_long(&pir_desc->pending, 1); 3504 return (VCPU_NOTIFY_EXIT); 3505 } 3506 } 3507 3508 /* 3509 * If the interrupt request does not require manipulation of the TMRs 3510 * for delivery, set it in PIR descriptor. It cannot be inserted into 3511 * the APIC page while the vCPU might be running. 3512 */ 3513 atomic_set_int(&pir_desc->pir[idx], mask); 3514 3515 /* 3516 * A notification is required whenever the 'pending' bit makes a 3517 * transition from 0->1. 3518 * 3519 * Even if the 'pending' bit is already asserted, notification about 3520 * the incoming interrupt may still be necessary. For example, if a 3521 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3522 * the 0->1 'pending' transition with a notification, but the vCPU 3523 * would ignore the interrupt for the time being. The same vCPU would 3524 * need to then be notified if a high-priority interrupt arrived which 3525 * satisfied the PPR. 3526 * 3527 * The priorities of interrupts injected while 'pending' is asserted 3528 * are tracked in a custom bitfield 'pending_prio'. Should the 3529 * to-be-injected interrupt exceed the priorities already present, the 3530 * notification is sent. The priorities recorded in 'pending_prio' are 3531 * cleared whenever the 'pending' bit makes another 0->1 transition. 3532 */ 3533 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3534 notify = VCPU_NOTIFY_APIC; 3535 vlapic_vtx->pending_prio = 0; 3536 } else { 3537 const uint_t old_prio = vlapic_vtx->pending_prio; 3538 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3539 3540 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3541 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3542 notify = VCPU_NOTIFY_APIC; 3543 } 3544 } 3545 3546 return (notify); 3547 } 3548 3549 static void 3550 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3551 { 3552 /* 3553 * When APICv is enabled for an instance, the traditional interrupt 3554 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3555 * used and the CPU does the heavy lifting of virtual interrupt 3556 * delivery. For that reason vmx_intr_accepted() should never be called 3557 * when APICv is enabled. 3558 */ 3559 panic("vmx_intr_accepted: not expected to be called"); 3560 } 3561 3562 static void 3563 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3564 { 3565 struct vlapic_vtx *vlapic_vtx; 3566 const uint32_t *tmrs; 3567 3568 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3569 tmrs = &vlapic_vtx->tmr_active[0]; 3570 3571 if (!vlapic_vtx->tmr_sync) { 3572 return; 3573 } 3574 3575 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3576 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3577 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3578 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3579 vlapic_vtx->tmr_sync = B_FALSE; 3580 } 3581 3582 static void 3583 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3584 { 3585 struct vmx *vmx; 3586 uint32_t proc_ctls; 3587 int vcpuid; 3588 3589 vcpuid = vlapic->vcpuid; 3590 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3591 3592 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3593 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3594 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3595 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3596 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3597 3598 vmcs_load(vmx->vmcs_pa[vcpuid]); 3599 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3600 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3601 } 3602 3603 static void 3604 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3605 { 3606 struct vmx *vmx; 3607 uint32_t proc_ctls2; 3608 int vcpuid; 3609 3610 vcpuid = vlapic->vcpuid; 3611 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3612 3613 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3614 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3615 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3616 3617 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3618 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3619 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3620 3621 vmcs_load(vmx->vmcs_pa[vcpuid]); 3622 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3623 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3624 3625 vmx_allow_x2apic_msrs(vmx, vcpuid); 3626 } 3627 3628 static void 3629 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3630 { 3631 psm_send_pir_ipi(hostcpu); 3632 } 3633 3634 static void 3635 vmx_apicv_sync(struct vlapic *vlapic) 3636 { 3637 struct vlapic_vtx *vlapic_vtx; 3638 struct pir_desc *pir_desc; 3639 struct LAPIC *lapic; 3640 uint_t i; 3641 3642 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3643 pir_desc = vlapic_vtx->pir_desc; 3644 lapic = vlapic->apic_page; 3645 3646 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3647 return; 3648 } 3649 3650 vlapic_vtx->pending_prio = 0; 3651 3652 /* Make sure the invalid (0-15) vectors are not set */ 3653 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3654 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3655 ASSERT0(pir_desc->pir[0] & 0xffff); 3656 3657 for (i = 0; i <= 7; i++) { 3658 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3659 uint32_t *irrp = &lapic->irr0 + (i * 4); 3660 3661 const uint32_t pending_level = 3662 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3663 const uint32_t pending_edge = 3664 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3665 const uint32_t pending_inject = 3666 atomic_readandclear_int(&pir_desc->pir[i]); 3667 3668 if (pending_level != 0) { 3669 /* 3670 * Level-triggered interrupts assert their corresponding 3671 * bit in the TMR when queued in IRR. 3672 */ 3673 *tmrp |= pending_level; 3674 *irrp |= pending_level; 3675 } 3676 if (pending_edge != 0) { 3677 /* 3678 * When queuing an edge-triggered interrupt in IRR, the 3679 * corresponding bit in the TMR is cleared. 3680 */ 3681 *tmrp &= ~pending_edge; 3682 *irrp |= pending_edge; 3683 } 3684 if (pending_inject != 0) { 3685 /* 3686 * Interrupts which do not require a change to the TMR 3687 * (because it already matches the necessary state) can 3688 * simply be queued in IRR. 3689 */ 3690 *irrp |= pending_inject; 3691 } 3692 3693 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3694 /* Check if VMX EOI triggers require updating. */ 3695 vlapic_vtx->tmr_active[i] = *tmrp; 3696 vlapic_vtx->tmr_sync = B_TRUE; 3697 } 3698 } 3699 } 3700 3701 static void 3702 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3703 { 3704 /* 3705 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3706 * TPR falls below a threshold priority. That threshold is set to the 3707 * current TPR priority, since guest interrupt status should be 3708 * re-evaluated if its TPR is set lower. 3709 */ 3710 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3711 } 3712 3713 static void 3714 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3715 { 3716 /* 3717 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3718 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3719 * the PPR is updated to reflect any change in the TPR here. 3720 */ 3721 vlapic_sync_tpr(vlapic); 3722 } 3723 3724 static struct vlapic * 3725 vmx_vlapic_init(void *arg, int vcpuid) 3726 { 3727 struct vmx *vmx = arg; 3728 struct vlapic_vtx *vlapic_vtx; 3729 struct vlapic *vlapic; 3730 3731 vlapic_vtx = kmem_zalloc(sizeof (struct vlapic_vtx), KM_SLEEP); 3732 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3733 vlapic_vtx->vmx = vmx; 3734 3735 vlapic = &vlapic_vtx->vlapic; 3736 vlapic->vm = vmx->vm; 3737 vlapic->vcpuid = vcpuid; 3738 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3739 3740 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3741 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3742 } 3743 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3744 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3745 vlapic->ops.sync_state = vmx_apicv_sync; 3746 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3747 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3748 3749 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3750 vlapic->ops.post_intr = vmx_apicv_notify; 3751 } 3752 } 3753 3754 vlapic_init(vlapic); 3755 3756 return (vlapic); 3757 } 3758 3759 static void 3760 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3761 { 3762 vlapic_cleanup(vlapic); 3763 kmem_free(vlapic, sizeof (struct vlapic_vtx)); 3764 } 3765 3766 static void 3767 vmx_savectx(void *arg, int vcpu) 3768 { 3769 struct vmx *vmx = arg; 3770 3771 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3772 vmcs_clear(vmx->vmcs_pa[vcpu]); 3773 vmx_msr_guest_exit(vmx, vcpu); 3774 /* 3775 * Having VMCLEARed the VMCS, it can no longer be re-entered 3776 * with VMRESUME, but must be VMLAUNCHed again. 3777 */ 3778 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3779 } 3780 3781 reset_gdtr_limit(); 3782 } 3783 3784 static void 3785 vmx_restorectx(void *arg, int vcpu) 3786 { 3787 struct vmx *vmx = arg; 3788 3789 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3790 3791 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3792 vmx_msr_guest_enter(vmx, vcpu); 3793 vmcs_load(vmx->vmcs_pa[vcpu]); 3794 } 3795 } 3796 3797 struct vmm_ops vmm_ops_intel = { 3798 .init = vmx_init, 3799 .cleanup = vmx_cleanup, 3800 .resume = vmx_restore, 3801 3802 .vminit = vmx_vminit, 3803 .vmrun = vmx_run, 3804 .vmcleanup = vmx_vmcleanup, 3805 .vmgetreg = vmx_getreg, 3806 .vmsetreg = vmx_setreg, 3807 .vmgetdesc = vmx_getdesc, 3808 .vmsetdesc = vmx_setdesc, 3809 .vmgetcap = vmx_getcap, 3810 .vmsetcap = vmx_setcap, 3811 .vlapic_init = vmx_vlapic_init, 3812 .vlapic_cleanup = vmx_vlapic_cleanup, 3813 3814 .vmsavectx = vmx_savectx, 3815 .vmrestorectx = vmx_restorectx, 3816 3817 .vmgetmsr = vmx_msr_get, 3818 .vmsetmsr = vmx_msr_set, 3819 }; 3820 3821 /* Side-effect free HW validation derived from checks in vmx_init. */ 3822 int 3823 vmx_x86_supported(const char **msg) 3824 { 3825 int error; 3826 uint32_t tmp; 3827 3828 ASSERT(msg != NULL); 3829 3830 /* Check support for primary processor-based VM-execution controls */ 3831 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3832 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3833 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3834 if (error) { 3835 *msg = "processor does not support desired primary " 3836 "processor-based controls"; 3837 return (error); 3838 } 3839 3840 /* Check support for secondary processor-based VM-execution controls */ 3841 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3842 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3843 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3844 if (error) { 3845 *msg = "processor does not support desired secondary " 3846 "processor-based controls"; 3847 return (error); 3848 } 3849 3850 /* Check support for pin-based VM-execution controls */ 3851 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3852 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3853 PINBASED_CTLS_ZERO_SETTING, &tmp); 3854 if (error) { 3855 *msg = "processor does not support desired pin-based controls"; 3856 return (error); 3857 } 3858 3859 /* Check support for VM-exit controls */ 3860 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3861 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3862 if (error) { 3863 *msg = "processor does not support desired exit controls"; 3864 return (error); 3865 } 3866 3867 /* Check support for VM-entry controls */ 3868 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3869 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3870 if (error) { 3871 *msg = "processor does not support desired entry controls"; 3872 return (error); 3873 } 3874 3875 /* Unrestricted guest is nominally optional, but not for us. */ 3876 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3877 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3878 if (error) { 3879 *msg = "processor does not support desired unrestricted guest " 3880 "controls"; 3881 return (error); 3882 } 3883 3884 return (0); 3885 } 3886