1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * Copyright (c) 2018 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 /* 32 * This file and its contents are supplied under the terms of the 33 * Common Development and Distribution License ("CDDL"), version 1.0. 34 * You may only use this file in accordance with the terms of version 35 * 1.0 of the CDDL. 36 * 37 * A full copy of the text of the CDDL should have accompanied this 38 * source. A copy of the CDDL is also available via the Internet at 39 * http://www.illumos.org/license/CDDL. 40 * 41 * Copyright 2015 Pluribus Networks Inc. 42 * Copyright 2018 Joyent, Inc. 43 * Copyright 2022 Oxide Computer Company 44 * Copyright 2022 MNX Cloud, Inc. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/kmem.h> 54 #include <sys/pcpu.h> 55 #include <sys/proc.h> 56 #include <sys/sysctl.h> 57 58 #include <sys/x86_archext.h> 59 #include <sys/smp_impldefs.h> 60 #include <sys/smt.h> 61 #include <sys/hma.h> 62 #include <sys/trap.h> 63 #include <sys/archsystm.h> 64 65 #include <machine/psl.h> 66 #include <machine/cpufunc.h> 67 #include <machine/md_var.h> 68 #include <machine/reg.h> 69 #include <machine/segments.h> 70 #include <machine/specialreg.h> 71 #include <machine/vmparam.h> 72 #include <sys/vmm_vm.h> 73 #include <sys/vmm_kernel.h> 74 75 #include <machine/vmm.h> 76 #include <machine/vmm_dev.h> 77 #include <sys/vmm_instruction_emul.h> 78 #include "vmm_lapic.h" 79 #include "vmm_host.h" 80 #include "vmm_ioport.h" 81 #include "vmm_stat.h" 82 #include "vatpic.h" 83 #include "vlapic.h" 84 #include "vlapic_priv.h" 85 86 #include "vmcs.h" 87 #include "vmx.h" 88 #include "vmx_msr.h" 89 #include "vmx_controls.h" 90 91 #define PINBASED_CTLS_ONE_SETTING \ 92 (PINBASED_EXTINT_EXITING | \ 93 PINBASED_NMI_EXITING | \ 94 PINBASED_VIRTUAL_NMI) 95 #define PINBASED_CTLS_ZERO_SETTING 0 96 97 #define PROCBASED_CTLS_WINDOW_SETTING \ 98 (PROCBASED_INT_WINDOW_EXITING | \ 99 PROCBASED_NMI_WINDOW_EXITING) 100 101 /* 102 * Distinct from FreeBSD bhyve, we consider several additional proc-based 103 * controls necessary: 104 * - TSC offsetting 105 * - HLT exiting 106 */ 107 #define PROCBASED_CTLS_ONE_SETTING \ 108 (PROCBASED_SECONDARY_CONTROLS | \ 109 PROCBASED_TSC_OFFSET | \ 110 PROCBASED_HLT_EXITING | \ 111 PROCBASED_MWAIT_EXITING | \ 112 PROCBASED_MONITOR_EXITING | \ 113 PROCBASED_IO_EXITING | \ 114 PROCBASED_MSR_BITMAPS | \ 115 PROCBASED_CTLS_WINDOW_SETTING | \ 116 PROCBASED_CR8_LOAD_EXITING | \ 117 PROCBASED_CR8_STORE_EXITING) 118 119 #define PROCBASED_CTLS_ZERO_SETTING \ 120 (PROCBASED_CR3_LOAD_EXITING | \ 121 PROCBASED_CR3_STORE_EXITING | \ 122 PROCBASED_IO_BITMAPS) 123 124 /* 125 * EPT and Unrestricted Guest are considered necessities. The latter is not a 126 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly 127 * without a bootrom starting in real mode. 128 */ 129 #define PROCBASED_CTLS2_ONE_SETTING \ 130 (PROCBASED2_ENABLE_EPT | \ 131 PROCBASED2_UNRESTRICTED_GUEST) 132 #define PROCBASED_CTLS2_ZERO_SETTING 0 133 134 #define VM_EXIT_CTLS_ONE_SETTING \ 135 (VM_EXIT_SAVE_DEBUG_CONTROLS | \ 136 VM_EXIT_HOST_LMA | \ 137 VM_EXIT_LOAD_PAT | \ 138 VM_EXIT_SAVE_EFER | \ 139 VM_EXIT_LOAD_EFER | \ 140 VM_EXIT_ACKNOWLEDGE_INTERRUPT) 141 142 #define VM_EXIT_CTLS_ZERO_SETTING 0 143 144 #define VM_ENTRY_CTLS_ONE_SETTING \ 145 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ 146 VM_ENTRY_LOAD_EFER) 147 148 #define VM_ENTRY_CTLS_ZERO_SETTING \ 149 (VM_ENTRY_INTO_SMM | \ 150 VM_ENTRY_DEACTIVATE_DUAL_MONITOR) 151 152 /* 153 * Cover the EPT capabilities used by bhyve at present: 154 * - 4-level page walks 155 * - write-back memory type 156 * - INVEPT operations (all types) 157 * - INVVPID operations (single-context only) 158 */ 159 #define EPT_CAPS_REQUIRED \ 160 (IA32_VMX_EPT_VPID_PWL4 | \ 161 IA32_VMX_EPT_VPID_TYPE_WB | \ 162 IA32_VMX_EPT_VPID_INVEPT | \ 163 IA32_VMX_EPT_VPID_INVEPT_SINGLE | \ 164 IA32_VMX_EPT_VPID_INVEPT_ALL | \ 165 IA32_VMX_EPT_VPID_INVVPID | \ 166 IA32_VMX_EPT_VPID_INVVPID_SINGLE) 167 168 #define HANDLED 1 169 #define UNHANDLED 0 170 171 SYSCTL_DECL(_hw_vmm); 172 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 173 NULL); 174 175 /* 176 * TSC scaling related constants 177 */ 178 #define INTEL_TSCM_INT_SIZE 16 179 #define INTEL_TSCM_FRAC_SIZE 48 180 181 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2; 182 static uint32_t exit_ctls, entry_ctls; 183 184 static uint64_t cr0_ones_mask, cr0_zeros_mask; 185 186 static uint64_t cr4_ones_mask, cr4_zeros_mask; 187 188 static int vmx_initialized; 189 190 /* 191 * Optional capabilities 192 */ 193 194 /* PAUSE triggers a VM-exit */ 195 static int cap_pause_exit; 196 197 /* WBINVD triggers a VM-exit */ 198 static int cap_wbinvd_exit; 199 200 /* Monitor trap flag */ 201 static int cap_monitor_trap; 202 203 /* Guests are allowed to use INVPCID */ 204 static int cap_invpcid; 205 206 /* Extra capabilities (VMX_CAP_*) beyond the minimum */ 207 static enum vmx_caps vmx_capabilities; 208 209 /* APICv posted interrupt vector */ 210 static int pirvec = -1; 211 212 static uint_t vpid_alloc_failed; 213 214 int guest_l1d_flush; 215 int guest_l1d_flush_sw; 216 217 /* MSR save region is composed of an array of 'struct msr_entry' */ 218 struct msr_entry { 219 uint32_t index; 220 uint32_t reserved; 221 uint64_t val; 222 }; 223 224 static struct msr_entry msr_load_list[1] __aligned(16); 225 226 /* 227 * The definitions of SDT probes for VMX. 228 */ 229 230 /* BEGIN CSTYLED */ 231 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry, 232 "struct vmx *", "int", "struct vm_exit *"); 233 234 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch, 235 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *"); 236 237 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess, 238 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 239 240 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr, 241 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 242 243 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr, 244 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t"); 245 246 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt, 247 "struct vmx *", "int", "struct vm_exit *"); 248 249 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap, 250 "struct vmx *", "int", "struct vm_exit *"); 251 252 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause, 253 "struct vmx *", "int", "struct vm_exit *"); 254 255 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow, 256 "struct vmx *", "int", "struct vm_exit *"); 257 258 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt, 259 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 260 261 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow, 262 "struct vmx *", "int", "struct vm_exit *"); 263 264 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout, 265 "struct vmx *", "int", "struct vm_exit *"); 266 267 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid, 268 "struct vmx *", "int", "struct vm_exit *"); 269 270 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception, 271 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int"); 272 273 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault, 274 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t"); 275 276 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault, 277 "struct vmx *", "int", "struct vm_exit *", "uint64_t"); 278 279 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi, 280 "struct vmx *", "int", "struct vm_exit *"); 281 282 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess, 283 "struct vmx *", "int", "struct vm_exit *"); 284 285 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite, 286 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *"); 287 288 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv, 289 "struct vmx *", "int", "struct vm_exit *"); 290 291 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor, 292 "struct vmx *", "int", "struct vm_exit *"); 293 294 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait, 295 "struct vmx *", "int", "struct vm_exit *"); 296 297 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn, 298 "struct vmx *", "int", "struct vm_exit *"); 299 300 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown, 301 "struct vmx *", "int", "struct vm_exit *", "uint32_t"); 302 303 SDT_PROBE_DEFINE4(vmm, vmx, exit, return, 304 "struct vmx *", "int", "struct vm_exit *", "int"); 305 /* END CSTYLED */ 306 307 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc); 308 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval); 309 static void vmx_apply_tsc_adjust(struct vmx *, int); 310 static void vmx_apicv_sync_tmr(struct vlapic *vlapic); 311 static void vmx_tpr_shadow_enter(struct vlapic *vlapic); 312 static void vmx_tpr_shadow_exit(struct vlapic *vlapic); 313 314 static void 315 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid) 316 { 317 /* 318 * Allow readonly access to the following x2APIC MSRs from the guest. 319 */ 320 guest_msr_ro(vmx, vcpuid, MSR_APIC_ID); 321 guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION); 322 guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR); 323 guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR); 324 325 for (uint_t i = 0; i < 8; i++) { 326 guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i); 327 guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i); 328 guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i); 329 } 330 331 guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR); 332 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER); 333 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL); 334 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT); 335 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0); 336 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1); 337 guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR); 338 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER); 339 guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER); 340 guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR); 341 342 /* 343 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest. 344 * 345 * These registers get special treatment described in the section 346 * "Virtualizing MSR-Based APIC Accesses". 347 */ 348 guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR); 349 guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI); 350 guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI); 351 } 352 353 static ulong_t 354 vmx_fix_cr0(ulong_t cr0) 355 { 356 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask); 357 } 358 359 /* 360 * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate 361 * the value observable from the guest. 362 */ 363 static ulong_t 364 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow) 365 { 366 return ((cr0 & ~cr0_ones_mask) | 367 (shadow & (cr0_zeros_mask | cr0_ones_mask))); 368 } 369 370 static ulong_t 371 vmx_fix_cr4(ulong_t cr4) 372 { 373 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask); 374 } 375 376 /* 377 * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate 378 * the value observable from the guest. 379 */ 380 static ulong_t 381 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow) 382 { 383 return ((cr4 & ~cr4_ones_mask) | 384 (shadow & (cr4_zeros_mask | cr4_ones_mask))); 385 } 386 387 static void 388 vpid_free(int vpid) 389 { 390 if (vpid < 0 || vpid > 0xffff) 391 panic("vpid_free: invalid vpid %d", vpid); 392 393 /* 394 * VPIDs [0,VM_MAXCPU] are special and are not allocated from 395 * the unit number allocator. 396 */ 397 398 if (vpid > VM_MAXCPU) 399 hma_vmx_vpid_free((uint16_t)vpid); 400 } 401 402 static void 403 vpid_alloc(uint16_t *vpid, int num) 404 { 405 int i, x; 406 407 if (num <= 0 || num > VM_MAXCPU) 408 panic("invalid number of vpids requested: %d", num); 409 410 /* 411 * If the "enable vpid" execution control is not enabled then the 412 * VPID is required to be 0 for all vcpus. 413 */ 414 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) { 415 for (i = 0; i < num; i++) 416 vpid[i] = 0; 417 return; 418 } 419 420 /* 421 * Allocate a unique VPID for each vcpu from the unit number allocator. 422 */ 423 for (i = 0; i < num; i++) { 424 uint16_t tmp; 425 426 tmp = hma_vmx_vpid_alloc(); 427 x = (tmp == 0) ? -1 : tmp; 428 429 if (x == -1) 430 break; 431 else 432 vpid[i] = x; 433 } 434 435 if (i < num) { 436 atomic_add_int(&vpid_alloc_failed, 1); 437 438 /* 439 * If the unit number allocator does not have enough unique 440 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range. 441 * 442 * These VPIDs are not be unique across VMs but this does not 443 * affect correctness because the combined mappings are also 444 * tagged with the EP4TA which is unique for each VM. 445 * 446 * It is still sub-optimal because the invvpid will invalidate 447 * combined mappings for a particular VPID across all EP4TAs. 448 */ 449 while (i-- > 0) 450 vpid_free(vpid[i]); 451 452 for (i = 0; i < num; i++) 453 vpid[i] = i + 1; 454 } 455 } 456 457 static int 458 vmx_cleanup(void) 459 { 460 /* This is taken care of by the hma registration */ 461 return (0); 462 } 463 464 static void 465 vmx_restore(void) 466 { 467 /* No-op on illumos */ 468 } 469 470 static int 471 vmx_init(void) 472 { 473 int error; 474 uint64_t fixed0, fixed1; 475 uint32_t tmp; 476 enum vmx_caps avail_caps = VMX_CAP_NONE; 477 478 /* Check support for primary processor-based VM-execution controls */ 479 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 480 MSR_VMX_TRUE_PROCBASED_CTLS, 481 PROCBASED_CTLS_ONE_SETTING, 482 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls); 483 if (error) { 484 printf("vmx_init: processor does not support desired primary " 485 "processor-based controls\n"); 486 return (error); 487 } 488 489 /* 490 * Clear interrupt-window/NMI-window exiting from the default proc-based 491 * controls. They are set and cleared based on runtime vCPU events. 492 */ 493 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING; 494 495 /* Check support for secondary processor-based VM-execution controls */ 496 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 497 MSR_VMX_PROCBASED_CTLS2, 498 PROCBASED_CTLS2_ONE_SETTING, 499 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2); 500 if (error) { 501 printf("vmx_init: processor does not support desired secondary " 502 "processor-based controls\n"); 503 return (error); 504 } 505 506 /* Check support for VPID */ 507 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 508 MSR_VMX_PROCBASED_CTLS2, 509 PROCBASED2_ENABLE_VPID, 510 0, &tmp); 511 if (error == 0) 512 procbased_ctls2 |= PROCBASED2_ENABLE_VPID; 513 514 /* Check support for pin-based VM-execution controls */ 515 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 516 MSR_VMX_TRUE_PINBASED_CTLS, 517 PINBASED_CTLS_ONE_SETTING, 518 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls); 519 if (error) { 520 printf("vmx_init: processor does not support desired " 521 "pin-based controls\n"); 522 return (error); 523 } 524 525 /* Check support for VM-exit controls */ 526 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 527 VM_EXIT_CTLS_ONE_SETTING, 528 VM_EXIT_CTLS_ZERO_SETTING, 529 &exit_ctls); 530 if (error) { 531 printf("vmx_init: processor does not support desired " 532 "exit controls\n"); 533 return (error); 534 } 535 536 /* Check support for VM-entry controls */ 537 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 538 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, 539 &entry_ctls); 540 if (error) { 541 printf("vmx_init: processor does not support desired " 542 "entry controls\n"); 543 return (error); 544 } 545 546 /* 547 * Check support for optional features by testing them 548 * as individual bits 549 */ 550 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 551 MSR_VMX_PROCBASED_CTLS, 552 PROCBASED_MTF, 0, 553 &tmp) == 0); 554 555 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 556 MSR_VMX_TRUE_PROCBASED_CTLS, 557 PROCBASED_PAUSE_EXITING, 0, 558 &tmp) == 0); 559 560 cap_wbinvd_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 561 MSR_VMX_PROCBASED_CTLS2, 562 PROCBASED2_WBINVD_EXITING, 0, 563 &tmp) == 0); 564 565 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 566 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0, 567 &tmp) == 0); 568 569 /* 570 * Check for APIC virtualization capabilities: 571 * - TPR shadowing 572 * - Full APICv (with or without x2APIC support) 573 * - Posted interrupt handling 574 */ 575 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS, 576 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) { 577 avail_caps |= VMX_CAP_TPR_SHADOW; 578 579 const uint32_t apicv_bits = 580 PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 581 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 582 PROCBASED2_VIRTUALIZE_X2APIC_MODE | 583 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY; 584 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 585 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) { 586 avail_caps |= VMX_CAP_APICV; 587 588 /* 589 * It may make sense in the future to differentiate 590 * hardware (or software) configurations with APICv but 591 * no support for accelerating x2APIC mode. 592 */ 593 avail_caps |= VMX_CAP_APICV_X2APIC; 594 595 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 596 MSR_VMX_TRUE_PINBASED_CTLS, 597 PINBASED_POSTED_INTERRUPT, 0, &tmp); 598 if (error == 0) { 599 /* 600 * If the PSM-provided interfaces for requesting 601 * and using a PIR IPI vector are present, use 602 * them for posted interrupts. 603 */ 604 if (psm_get_pir_ipivect != NULL && 605 psm_send_pir_ipi != NULL) { 606 pirvec = psm_get_pir_ipivect(); 607 avail_caps |= VMX_CAP_APICV_PIR; 608 } 609 } 610 } 611 } 612 613 /* 614 * Check for necessary EPT capabilities 615 * 616 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the 617 * hypervisor intends to utilize dirty page tracking. 618 */ 619 uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); 620 if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) { 621 cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps); 622 return (EINVAL); 623 } 624 625 #ifdef __FreeBSD__ 626 guest_l1d_flush = (cpu_ia32_arch_caps & 627 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; 628 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); 629 630 /* 631 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when 632 * available. Otherwise fall back to the software flush 633 * method which loads enough data from the kernel text to 634 * flush existing L1D content, both on VMX entry and on NMI 635 * return. 636 */ 637 if (guest_l1d_flush) { 638 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { 639 guest_l1d_flush_sw = 1; 640 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", 641 &guest_l1d_flush_sw); 642 } 643 if (guest_l1d_flush_sw) { 644 if (nmi_flush_l1d_sw <= 1) 645 nmi_flush_l1d_sw = 1; 646 } else { 647 msr_load_list[0].index = MSR_IA32_FLUSH_CMD; 648 msr_load_list[0].val = IA32_FLUSH_CMD_L1D; 649 } 650 } 651 #else 652 /* L1D flushing is taken care of by smt_acquire() and friends */ 653 guest_l1d_flush = 0; 654 #endif /* __FreeBSD__ */ 655 656 /* 657 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1 658 */ 659 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0); 660 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1); 661 cr0_ones_mask = fixed0 & fixed1; 662 cr0_zeros_mask = ~fixed0 & ~fixed1; 663 664 /* 665 * Since Unrestricted Guest was already verified present, CR0_PE and 666 * CR0_PG are allowed to be set to zero in VMX non-root operation 667 */ 668 cr0_ones_mask &= ~(CR0_PG | CR0_PE); 669 670 /* 671 * Do not allow the guest to set CR0_NW or CR0_CD. 672 */ 673 cr0_zeros_mask |= (CR0_NW | CR0_CD); 674 675 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0); 676 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1); 677 cr4_ones_mask = fixed0 & fixed1; 678 cr4_zeros_mask = ~fixed0 & ~fixed1; 679 680 vmx_msr_init(); 681 682 vmx_capabilities = avail_caps; 683 vmx_initialized = 1; 684 685 return (0); 686 } 687 688 static void 689 vmx_trigger_hostintr(int vector) 690 { 691 VERIFY(vector >= 32 && vector <= 255); 692 vmx_call_isr(vector - 32); 693 } 694 695 static void * 696 vmx_vminit(struct vm *vm) 697 { 698 uint16_t vpid[VM_MAXCPU]; 699 int i, error, datasel; 700 struct vmx *vmx; 701 uint32_t exc_bitmap; 702 uint16_t maxcpus; 703 uint32_t proc_ctls, proc2_ctls, pin_ctls; 704 uint64_t apic_access_pa = UINT64_MAX; 705 706 vmx = kmem_zalloc(sizeof (struct vmx), KM_SLEEP); 707 VERIFY3U((uintptr_t)vmx & PAGE_MASK, ==, 0); 708 709 vmx->vm = vm; 710 vmx->eptp = vmspace_table_root(vm_get_vmspace(vm)); 711 712 /* 713 * Clean up EP4TA-tagged guest-physical and combined mappings 714 * 715 * VMX transitions are not required to invalidate any guest physical 716 * mappings. So, it may be possible for stale guest physical mappings 717 * to be present in the processor TLBs. 718 * 719 * Combined mappings for this EP4TA are also invalidated for all VPIDs. 720 */ 721 hma_vmx_invept_allcpus((uintptr_t)vmx->eptp); 722 723 vmx_msr_bitmap_initialize(vmx); 724 725 vpid_alloc(vpid, VM_MAXCPU); 726 727 /* Grab the established defaults */ 728 proc_ctls = procbased_ctls; 729 proc2_ctls = procbased_ctls2; 730 pin_ctls = pinbased_ctls; 731 /* For now, default to the available capabilities */ 732 vmx->vmx_caps = vmx_capabilities; 733 734 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 735 proc_ctls |= PROCBASED_USE_TPR_SHADOW; 736 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING; 737 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING; 738 } 739 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 740 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)); 741 742 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES | 743 PROCBASED2_APIC_REGISTER_VIRTUALIZATION | 744 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY); 745 746 /* 747 * Allocate a page of memory to back the APIC access address for 748 * when APICv features are in use. Guest MMIO accesses should 749 * never actually reach this page, but rather be intercepted. 750 */ 751 vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP); 752 VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0); 753 apic_access_pa = vtophys(vmx->apic_access_page); 754 755 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE, 756 apic_access_pa); 757 /* XXX this should really return an error to the caller */ 758 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error)); 759 } 760 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 761 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV)); 762 763 pin_ctls |= PINBASED_POSTED_INTERRUPT; 764 } 765 766 /* Reflect any enabled defaults in the cap set */ 767 int cap_defaults = 0; 768 if ((proc_ctls & PROCBASED_HLT_EXITING) != 0) { 769 cap_defaults |= (1 << VM_CAP_HALT_EXIT); 770 } 771 if ((proc_ctls & PROCBASED_PAUSE_EXITING) != 0) { 772 cap_defaults |= (1 << VM_CAP_PAUSE_EXIT); 773 } 774 if ((proc_ctls & PROCBASED_MTF) != 0) { 775 cap_defaults |= (1 << VM_CAP_MTRAP_EXIT); 776 } 777 if ((proc2_ctls & PROCBASED2_ENABLE_INVPCID) != 0) { 778 cap_defaults |= (1 << VM_CAP_ENABLE_INVPCID); 779 } 780 781 maxcpus = vm_get_maxcpus(vm); 782 datasel = vmm_get_host_datasel(); 783 for (i = 0; i < maxcpus; i++) { 784 /* 785 * Cache physical address lookups for various components which 786 * may be required inside the critical_enter() section implied 787 * by VMPTRLD() below. 788 */ 789 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]); 790 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]); 791 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]); 792 793 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]); 794 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]); 795 796 vmx_msr_guest_init(vmx, i); 797 798 vmcs_load(vmx->vmcs_pa[i]); 799 800 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat()); 801 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer()); 802 803 /* Load the control registers */ 804 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0()); 805 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE); 806 807 /* Load the segment selectors */ 808 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel()); 809 810 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel); 811 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel); 812 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel); 813 814 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel()); 815 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel()); 816 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel()); 817 818 /* 819 * Configure host sysenter MSRs to be restored on VM exit. 820 * The thread-specific MSR_INTC_SEP_ESP value is loaded in 821 * vmx_run. 822 */ 823 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL); 824 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP, 825 rdmsr(MSR_SYSENTER_EIP_MSR)); 826 827 /* instruction pointer */ 828 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest); 829 830 /* link pointer */ 831 vmcs_write(VMCS_LINK_POINTER, ~0); 832 833 vmcs_write(VMCS_EPTP, vmx->eptp); 834 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls); 835 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 836 837 uint32_t use_proc2_ctls = proc2_ctls; 838 if (cap_wbinvd_exit && vcpu_trap_wbinvd(vm, i) != 0) 839 use_proc2_ctls |= PROCBASED2_WBINVD_EXITING; 840 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, use_proc2_ctls); 841 842 vmcs_write(VMCS_EXIT_CTLS, exit_ctls); 843 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls); 844 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa); 845 vmcs_write(VMCS_VPID, vpid[i]); 846 847 if (guest_l1d_flush && !guest_l1d_flush_sw) { 848 vmcs_write(VMCS_ENTRY_MSR_LOAD, 849 vtophys(&msr_load_list[0])); 850 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT, 851 nitems(msr_load_list)); 852 vmcs_write(VMCS_EXIT_MSR_STORE, 0); 853 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0); 854 } 855 856 /* exception bitmap */ 857 if (vcpu_trace_exceptions(vm, i)) 858 exc_bitmap = 0xffffffff; 859 else 860 exc_bitmap = 1 << IDT_MC; 861 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap); 862 863 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1; 864 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1); 865 866 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 867 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa); 868 } 869 870 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 871 vmcs_write(VMCS_APIC_ACCESS, apic_access_pa); 872 vmcs_write(VMCS_EOI_EXIT0, 0); 873 vmcs_write(VMCS_EOI_EXIT1, 0); 874 vmcs_write(VMCS_EOI_EXIT2, 0); 875 vmcs_write(VMCS_EOI_EXIT3, 0); 876 } 877 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 878 vmcs_write(VMCS_PIR_VECTOR, pirvec); 879 vmcs_write(VMCS_PIR_DESC, pir_desc_pa); 880 } 881 882 /* 883 * Set up the CR0/4 masks and configure the read shadow state 884 * to the power-on register value from the Intel Sys Arch. 885 * CR0 - 0x60000010 886 * CR4 - 0 887 */ 888 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask); 889 vmcs_write(VMCS_CR0_SHADOW, 0x60000010); 890 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask); 891 vmcs_write(VMCS_CR4_SHADOW, 0); 892 893 vmcs_clear(vmx->vmcs_pa[i]); 894 895 vmx->cap[i].set = cap_defaults; 896 vmx->cap[i].proc_ctls = proc_ctls; 897 vmx->cap[i].proc_ctls2 = proc2_ctls; 898 vmx->cap[i].exc_bitmap = exc_bitmap; 899 900 vmx->state[i].nextrip = ~0; 901 vmx->state[i].lastcpu = NOCPU; 902 vmx->state[i].vpid = vpid[i]; 903 } 904 905 return (vmx); 906 } 907 908 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved"); 909 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done"); 910 911 #define INVVPID_TYPE_ADDRESS 0UL 912 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 913 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 914 915 struct invvpid_desc { 916 uint16_t vpid; 917 uint16_t _res1; 918 uint32_t _res2; 919 uint64_t linear_addr; 920 }; 921 CTASSERT(sizeof (struct invvpid_desc) == 16); 922 923 static __inline void 924 invvpid(uint64_t type, struct invvpid_desc desc) 925 { 926 int error; 927 928 DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid, 929 uint64_t, desc.linear_addr); 930 931 __asm __volatile("invvpid %[desc], %[type];" 932 VMX_SET_ERROR_CODE_ASM 933 : [error] "=r" (error) 934 : [desc] "m" (desc), [type] "r" (type) 935 : "memory"); 936 937 if (error) { 938 panic("invvpid error %d", error); 939 } 940 } 941 942 /* 943 * Invalidate guest mappings identified by its VPID from the TLB. 944 * 945 * This is effectively a flush of the guest TLB, removing only "combined 946 * mappings" (to use the VMX parlance). Actions which modify the EPT structures 947 * for the instance (such as unmapping GPAs) would require an 'invept' flush. 948 */ 949 static void 950 vmx_invvpid(struct vmx *vmx, int vcpu, int running) 951 { 952 struct vmxstate *vmxstate; 953 struct vmspace *vms; 954 955 vmxstate = &vmx->state[vcpu]; 956 if (vmxstate->vpid == 0) { 957 return; 958 } 959 960 if (!running) { 961 /* 962 * Set the 'lastcpu' to an invalid host cpu. 963 * 964 * This will invalidate TLB entries tagged with the vcpu's 965 * vpid the next time it runs via vmx_set_pcpu_defaults(). 966 */ 967 vmxstate->lastcpu = NOCPU; 968 return; 969 } 970 971 /* 972 * Invalidate all mappings tagged with 'vpid' 973 * 974 * This is done when a vCPU moves between host CPUs, where there may be 975 * stale TLB entries for this VPID on the target, or if emulated actions 976 * in the guest CPU have incurred an explicit TLB flush. 977 */ 978 vms = vm_get_vmspace(vmx->vm); 979 if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) { 980 struct invvpid_desc invvpid_desc = { 981 .vpid = vmxstate->vpid, 982 .linear_addr = 0, 983 ._res1 = 0, 984 ._res2 = 0, 985 }; 986 987 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc); 988 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1); 989 } else { 990 /* 991 * The INVVPID can be skipped if an INVEPT is going to be 992 * performed before entering the guest. The INVEPT will 993 * invalidate combined mappings for the EP4TA associated with 994 * this guest, in all VPIDs. 995 */ 996 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1); 997 } 998 } 999 1000 static __inline void 1001 invept(uint64_t type, uint64_t eptp) 1002 { 1003 int error; 1004 struct invept_desc { 1005 uint64_t eptp; 1006 uint64_t _resv; 1007 } desc = { eptp, 0 }; 1008 1009 DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp); 1010 1011 __asm __volatile("invept %[desc], %[type];" 1012 VMX_SET_ERROR_CODE_ASM 1013 : [error] "=r" (error) 1014 : [desc] "m" (desc), [type] "r" (type) 1015 : "memory"); 1016 1017 if (error != 0) { 1018 panic("invvpid error %d", error); 1019 } 1020 } 1021 1022 static void 1023 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu) 1024 { 1025 struct vmxstate *vmxstate; 1026 1027 /* 1028 * Regardless of whether the VM appears to have migrated between CPUs, 1029 * save the host sysenter stack pointer. As it points to the kernel 1030 * stack of each thread, the correct value must be maintained for every 1031 * trip into the critical section. 1032 */ 1033 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR)); 1034 1035 /* 1036 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or 1037 * migration between host CPUs with differing TSC values. 1038 */ 1039 vmx_apply_tsc_adjust(vmx, vcpu); 1040 1041 vmxstate = &vmx->state[vcpu]; 1042 if (vmxstate->lastcpu == curcpu) 1043 return; 1044 1045 vmxstate->lastcpu = curcpu; 1046 1047 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1); 1048 1049 /* Load the per-CPU IDT address */ 1050 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase()); 1051 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase()); 1052 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase()); 1053 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase()); 1054 vmx_invvpid(vmx, vcpu, 1); 1055 } 1056 1057 static __inline bool 1058 vmx_int_window_exiting(struct vmx *vmx, int vcpu) 1059 { 1060 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0); 1061 } 1062 1063 static __inline void 1064 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu) 1065 { 1066 if (!vmx_int_window_exiting(vmx, vcpu)) { 1067 /* Enable interrupt window exiting */ 1068 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING; 1069 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1070 } 1071 } 1072 1073 static __inline void 1074 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu) 1075 { 1076 /* Disable interrupt window exiting */ 1077 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING; 1078 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1079 } 1080 1081 static __inline bool 1082 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu) 1083 { 1084 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0); 1085 } 1086 1087 static __inline void 1088 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu) 1089 { 1090 if (!vmx_nmi_window_exiting(vmx, vcpu)) { 1091 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING; 1092 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1093 } 1094 } 1095 1096 static __inline void 1097 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu) 1098 { 1099 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING; 1100 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls); 1101 } 1102 1103 /* 1104 * Set the TSC adjustment, taking into account the offsets measured between 1105 * host physical CPUs. This is required even if the guest has not set a TSC 1106 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has 1107 * migrated onto. Without this mitigation, un-synched host TSCs will convey 1108 * the appearance of TSC time-travel to the guest as its vCPUs migrate. 1109 */ 1110 static void 1111 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu) 1112 { 1113 const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true); 1114 1115 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET); 1116 1117 if (vmx->tsc_offset_active[vcpu] != offset) { 1118 vmcs_write(VMCS_TSC_OFFSET, offset); 1119 vmx->tsc_offset_active[vcpu] = offset; 1120 } 1121 } 1122 1123 CTASSERT(VMCS_INTR_T_HWINTR == VM_INTINFO_HWINTR); 1124 CTASSERT(VMCS_INTR_T_NMI == VM_INTINFO_NMI); 1125 CTASSERT(VMCS_INTR_T_HWEXCEPTION == VM_INTINFO_HWEXCP); 1126 CTASSERT(VMCS_INTR_T_SWINTR == VM_INTINFO_SWINTR); 1127 CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION == VM_INTINFO_RESV5); 1128 CTASSERT(VMCS_INTR_T_SWEXCEPTION == VM_INTINFO_RESV6); 1129 CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID == VM_INTINFO_DEL_ERRCODE); 1130 CTASSERT(VMCS_INTR_T_MASK == VM_INTINFO_MASK_TYPE); 1131 1132 static uint64_t 1133 vmx_idtvec_to_intinfo(uint32_t info, uint32_t errcode) 1134 { 1135 ASSERT(info & VMCS_IDT_VEC_VALID); 1136 1137 const uint32_t type = info & VMCS_INTR_T_MASK; 1138 const uint8_t vec = info & 0xff; 1139 1140 switch (type) { 1141 case VMCS_INTR_T_HWINTR: 1142 case VMCS_INTR_T_NMI: 1143 case VMCS_INTR_T_HWEXCEPTION: 1144 case VMCS_INTR_T_SWINTR: 1145 case VMCS_INTR_T_PRIV_SWEXCEPTION: 1146 case VMCS_INTR_T_SWEXCEPTION: 1147 break; 1148 default: 1149 panic("unexpected event type 0x%03x", type); 1150 } 1151 1152 uint64_t intinfo = VM_INTINFO_VALID | type | vec; 1153 if (info & VMCS_IDT_VEC_ERRCODE_VALID) { 1154 intinfo |= (uint64_t)errcode << 32; 1155 } 1156 1157 return (intinfo); 1158 } 1159 1160 CTASSERT(VMCS_INTR_DEL_ERRCODE == VMCS_IDT_VEC_ERRCODE_VALID); 1161 CTASSERT(VMCS_INTR_VALID == VMCS_IDT_VEC_VALID); 1162 1163 /* 1164 * Store VMX-specific event injection info for later handling. This depends on 1165 * the bhyve-internal event definitions matching those in the VMCS, as ensured 1166 * by the vmx_idtvec_to_intinfo() and the related CTASSERTs. 1167 */ 1168 static void 1169 vmx_stash_intinfo(struct vmx *vmx, int vcpu) 1170 { 1171 uint64_t info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1172 if ((info & VMCS_INTR_VALID) != 0) { 1173 uint32_t errcode = 0; 1174 1175 if ((info & VMCS_INTR_DEL_ERRCODE) != 0) { 1176 errcode = vmcs_read(VMCS_ENTRY_EXCEPTION_ERROR); 1177 } 1178 1179 VERIFY0(vm_exit_intinfo(vmx->vm, vcpu, 1180 vmx_idtvec_to_intinfo(info, errcode))); 1181 1182 vmcs_write(VMCS_ENTRY_INTR_INFO, 0); 1183 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 0); 1184 } 1185 } 1186 1187 static void 1188 vmx_inject_intinfo(uint64_t info) 1189 { 1190 ASSERT(VM_INTINFO_PENDING(info)); 1191 ASSERT0(info & VM_INTINFO_MASK_RSVD); 1192 1193 /* 1194 * The bhyve format matches that of the VMCS, which is ensured by the 1195 * CTASSERTs above. 1196 */ 1197 uint32_t inject = info; 1198 switch (VM_INTINFO_VECTOR(info)) { 1199 case IDT_BP: 1200 case IDT_OF: 1201 /* 1202 * VT-x requires #BP and #OF to be injected as software 1203 * exceptions. 1204 */ 1205 inject &= ~VMCS_INTR_T_MASK; 1206 inject |= VMCS_INTR_T_SWEXCEPTION; 1207 break; 1208 default: 1209 break; 1210 } 1211 1212 if (VM_INTINFO_HAS_ERRCODE(info)) { 1213 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 1214 VM_INTINFO_ERRCODE(info)); 1215 } 1216 vmcs_write(VMCS_ENTRY_INTR_INFO, inject); 1217 } 1218 1219 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \ 1220 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1221 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \ 1222 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING) 1223 1224 static void 1225 vmx_inject_nmi(struct vmx *vmx, int vcpu) 1226 { 1227 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING); 1228 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1229 1230 /* 1231 * Inject the virtual NMI. The vector must be the NMI IDT entry 1232 * or the VMCS entry check will fail. 1233 */ 1234 vmcs_write(VMCS_ENTRY_INTR_INFO, 1235 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID); 1236 1237 /* Clear the request */ 1238 vm_nmi_clear(vmx->vm, vcpu); 1239 } 1240 1241 /* 1242 * Inject exceptions, NMIs, and ExtINTs. 1243 * 1244 * The logic behind these are complicated and may involve mutex contention, so 1245 * the injection is performed without the protection of host CPU interrupts 1246 * being disabled. This means a racing notification could be "lost", 1247 * necessitating a later call to vmx_inject_recheck() to close that window 1248 * of opportunity. 1249 */ 1250 static enum event_inject_state 1251 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip) 1252 { 1253 uint64_t entryinfo; 1254 uint32_t gi, info; 1255 int vector; 1256 enum event_inject_state state; 1257 1258 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1259 info = vmcs_read(VMCS_ENTRY_INTR_INFO); 1260 state = EIS_CAN_INJECT; 1261 1262 /* Clear any interrupt blocking if the guest %rip has changed */ 1263 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) { 1264 gi &= ~HWINTR_BLOCKING; 1265 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1266 } 1267 1268 /* 1269 * It could be that an interrupt is already pending for injection from 1270 * the VMCS. This would be the case if the vCPU exited for conditions 1271 * such as an AST before a vm-entry delivered the injection. 1272 */ 1273 if ((info & VMCS_INTR_VALID) != 0) { 1274 return (EIS_EV_EXISTING | EIS_REQ_EXIT); 1275 } 1276 1277 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) { 1278 vmx_inject_intinfo(entryinfo); 1279 state = EIS_EV_INJECTED; 1280 } 1281 1282 if (vm_nmi_pending(vmx->vm, vcpu)) { 1283 /* 1284 * If there are no conditions blocking NMI injection then inject 1285 * it directly here otherwise enable "NMI window exiting" to 1286 * inject it as soon as we can. 1287 * 1288 * According to the Intel manual, some CPUs do not allow NMI 1289 * injection when STI_BLOCKING is active. That check is 1290 * enforced here, regardless of CPU capability. If running on a 1291 * CPU without such a restriction it will immediately exit and 1292 * the NMI will be injected in the "NMI window exiting" handler. 1293 */ 1294 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) { 1295 if (state == EIS_CAN_INJECT) { 1296 vmx_inject_nmi(vmx, vcpu); 1297 state = EIS_EV_INJECTED; 1298 } else { 1299 return (state | EIS_REQ_EXIT); 1300 } 1301 } else { 1302 vmx_set_nmi_window_exiting(vmx, vcpu); 1303 } 1304 } 1305 1306 if (vm_extint_pending(vmx->vm, vcpu)) { 1307 if (state != EIS_CAN_INJECT) { 1308 return (state | EIS_REQ_EXIT); 1309 } 1310 if ((gi & HWINTR_BLOCKING) != 0 || 1311 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1312 return (EIS_GI_BLOCK); 1313 } 1314 1315 /* Ask the legacy pic for a vector to inject */ 1316 vatpic_pending_intr(vmx->vm, &vector); 1317 1318 /* 1319 * From the Intel SDM, Volume 3, Section "Maskable 1320 * Hardware Interrupts": 1321 * - maskable interrupt vectors [0,255] can be delivered 1322 * through the INTR pin. 1323 */ 1324 KASSERT(vector >= 0 && vector <= 255, 1325 ("invalid vector %d from INTR", vector)); 1326 1327 /* Inject the interrupt */ 1328 vmcs_write(VMCS_ENTRY_INTR_INFO, 1329 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1330 1331 vm_extint_clear(vmx->vm, vcpu); 1332 vatpic_intr_accepted(vmx->vm, vector); 1333 state = EIS_EV_INJECTED; 1334 } 1335 1336 return (state); 1337 } 1338 1339 /* 1340 * Inject any interrupts pending on the vLAPIC. 1341 * 1342 * This is done with host CPU interrupts disabled so notification IPIs, either 1343 * from the standard vCPU notification or APICv posted interrupts, will be 1344 * queued on the host APIC and recognized when entering VMX context. 1345 */ 1346 static enum event_inject_state 1347 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic) 1348 { 1349 int vector; 1350 1351 if (!vlapic_pending_intr(vlapic, &vector)) { 1352 return (EIS_CAN_INJECT); 1353 } 1354 1355 /* 1356 * From the Intel SDM, Volume 3, Section "Maskable 1357 * Hardware Interrupts": 1358 * - maskable interrupt vectors [16,255] can be delivered 1359 * through the local APIC. 1360 */ 1361 KASSERT(vector >= 16 && vector <= 255, 1362 ("invalid vector %d from local APIC", vector)); 1363 1364 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 1365 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS); 1366 uint16_t status_new = (status_old & 0xff00) | vector; 1367 1368 /* 1369 * The APICv state will have been synced into the vLAPIC 1370 * as part of vlapic_pending_intr(). Prepare the VMCS 1371 * for the to-be-injected pending interrupt. 1372 */ 1373 if (status_new > status_old) { 1374 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new); 1375 } 1376 1377 /* 1378 * Ensure VMCS state regarding EOI traps is kept in sync 1379 * with the TMRs in the vlapic. 1380 */ 1381 vmx_apicv_sync_tmr(vlapic); 1382 1383 /* 1384 * The rest of the injection process for injecting the 1385 * interrupt(s) is handled by APICv. It does not preclude other 1386 * event injection from occurring. 1387 */ 1388 return (EIS_CAN_INJECT); 1389 } 1390 1391 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID); 1392 1393 /* Does guest interruptability block injection? */ 1394 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 || 1395 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) { 1396 return (EIS_GI_BLOCK); 1397 } 1398 1399 /* Inject the interrupt */ 1400 vmcs_write(VMCS_ENTRY_INTR_INFO, 1401 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector); 1402 1403 /* Update the Local APIC ISR */ 1404 vlapic_intr_accepted(vlapic, vector); 1405 1406 return (EIS_EV_INJECTED); 1407 } 1408 1409 /* 1410 * Re-check for events to be injected. 1411 * 1412 * Once host CPU interrupts are disabled, check for the presence of any events 1413 * which require injection processing. If an exit is required upon injection, 1414 * or once the guest becomes interruptable, that will be configured too. 1415 */ 1416 static bool 1417 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state) 1418 { 1419 if (state == EIS_CAN_INJECT) { 1420 if (vm_nmi_pending(vmx->vm, vcpu) && 1421 !vmx_nmi_window_exiting(vmx, vcpu)) { 1422 /* queued NMI not blocked by NMI-window-exiting */ 1423 return (true); 1424 } 1425 if (vm_extint_pending(vmx->vm, vcpu)) { 1426 /* queued ExtINT not blocked by existing injection */ 1427 return (true); 1428 } 1429 } else { 1430 if ((state & EIS_REQ_EXIT) != 0) { 1431 /* 1432 * Use a self-IPI to force an immediate exit after 1433 * event injection has occurred. 1434 */ 1435 poke_cpu(CPU->cpu_id); 1436 } else { 1437 /* 1438 * If any event is being injected, an exit immediately 1439 * upon becoming interruptable again will allow pending 1440 * or newly queued events to be injected in a timely 1441 * manner. 1442 */ 1443 vmx_set_int_window_exiting(vmx, vcpu); 1444 } 1445 } 1446 return (false); 1447 } 1448 1449 /* 1450 * If the Virtual NMIs execution control is '1' then the logical processor 1451 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of 1452 * the VMCS. An IRET instruction in VMX non-root operation will remove any 1453 * virtual-NMI blocking. 1454 * 1455 * This unblocking occurs even if the IRET causes a fault. In this case the 1456 * hypervisor needs to restore virtual-NMI blocking before resuming the guest. 1457 */ 1458 static void 1459 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid) 1460 { 1461 uint32_t gi; 1462 1463 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1464 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1465 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1466 } 1467 1468 static void 1469 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid) 1470 { 1471 uint32_t gi; 1472 1473 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1474 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 1475 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 1476 } 1477 1478 static void 1479 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid) 1480 { 1481 uint32_t gi; 1482 1483 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 1484 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING, 1485 ("NMI blocking is not in effect %x", gi)); 1486 } 1487 1488 static int 1489 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 1490 { 1491 struct vmxctx *vmxctx; 1492 uint64_t xcrval; 1493 const struct xsave_limits *limits; 1494 1495 vmxctx = &vmx->ctx[vcpu]; 1496 limits = vmm_get_xsave_limits(); 1497 1498 /* 1499 * Note that the processor raises a GP# fault on its own if 1500 * xsetbv is executed for CPL != 0, so we do not have to 1501 * emulate that fault here. 1502 */ 1503 1504 /* Only xcr0 is supported. */ 1505 if (vmxctx->guest_rcx != 0) { 1506 vm_inject_gp(vmx->vm, vcpu); 1507 return (HANDLED); 1508 } 1509 1510 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */ 1511 if (!limits->xsave_enabled || 1512 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) { 1513 vm_inject_ud(vmx->vm, vcpu); 1514 return (HANDLED); 1515 } 1516 1517 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff); 1518 if ((xcrval & ~limits->xcr0_allowed) != 0) { 1519 vm_inject_gp(vmx->vm, vcpu); 1520 return (HANDLED); 1521 } 1522 1523 if (!(xcrval & XFEATURE_ENABLED_X87)) { 1524 vm_inject_gp(vmx->vm, vcpu); 1525 return (HANDLED); 1526 } 1527 1528 /* AVX (YMM_Hi128) requires SSE. */ 1529 if (xcrval & XFEATURE_ENABLED_AVX && 1530 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) { 1531 vm_inject_gp(vmx->vm, vcpu); 1532 return (HANDLED); 1533 } 1534 1535 /* 1536 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask, 1537 * ZMM_Hi256, and Hi16_ZMM. 1538 */ 1539 if (xcrval & XFEATURE_AVX512 && 1540 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) != 1541 (XFEATURE_AVX512 | XFEATURE_AVX)) { 1542 vm_inject_gp(vmx->vm, vcpu); 1543 return (HANDLED); 1544 } 1545 1546 /* 1547 * Intel MPX requires both bound register state flags to be 1548 * set. 1549 */ 1550 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) != 1551 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) { 1552 vm_inject_gp(vmx->vm, vcpu); 1553 return (HANDLED); 1554 } 1555 1556 /* 1557 * This runs "inside" vmrun() with the guest's FPU state, so 1558 * modifying xcr0 directly modifies the guest's xcr0, not the 1559 * host's. 1560 */ 1561 load_xcr(0, xcrval); 1562 return (HANDLED); 1563 } 1564 1565 static uint64_t 1566 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident) 1567 { 1568 const struct vmxctx *vmxctx; 1569 1570 vmxctx = &vmx->ctx[vcpu]; 1571 1572 switch (ident) { 1573 case 0: 1574 return (vmxctx->guest_rax); 1575 case 1: 1576 return (vmxctx->guest_rcx); 1577 case 2: 1578 return (vmxctx->guest_rdx); 1579 case 3: 1580 return (vmxctx->guest_rbx); 1581 case 4: 1582 return (vmcs_read(VMCS_GUEST_RSP)); 1583 case 5: 1584 return (vmxctx->guest_rbp); 1585 case 6: 1586 return (vmxctx->guest_rsi); 1587 case 7: 1588 return (vmxctx->guest_rdi); 1589 case 8: 1590 return (vmxctx->guest_r8); 1591 case 9: 1592 return (vmxctx->guest_r9); 1593 case 10: 1594 return (vmxctx->guest_r10); 1595 case 11: 1596 return (vmxctx->guest_r11); 1597 case 12: 1598 return (vmxctx->guest_r12); 1599 case 13: 1600 return (vmxctx->guest_r13); 1601 case 14: 1602 return (vmxctx->guest_r14); 1603 case 15: 1604 return (vmxctx->guest_r15); 1605 default: 1606 panic("invalid vmx register %d", ident); 1607 } 1608 } 1609 1610 static void 1611 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval) 1612 { 1613 struct vmxctx *vmxctx; 1614 1615 vmxctx = &vmx->ctx[vcpu]; 1616 1617 switch (ident) { 1618 case 0: 1619 vmxctx->guest_rax = regval; 1620 break; 1621 case 1: 1622 vmxctx->guest_rcx = regval; 1623 break; 1624 case 2: 1625 vmxctx->guest_rdx = regval; 1626 break; 1627 case 3: 1628 vmxctx->guest_rbx = regval; 1629 break; 1630 case 4: 1631 vmcs_write(VMCS_GUEST_RSP, regval); 1632 break; 1633 case 5: 1634 vmxctx->guest_rbp = regval; 1635 break; 1636 case 6: 1637 vmxctx->guest_rsi = regval; 1638 break; 1639 case 7: 1640 vmxctx->guest_rdi = regval; 1641 break; 1642 case 8: 1643 vmxctx->guest_r8 = regval; 1644 break; 1645 case 9: 1646 vmxctx->guest_r9 = regval; 1647 break; 1648 case 10: 1649 vmxctx->guest_r10 = regval; 1650 break; 1651 case 11: 1652 vmxctx->guest_r11 = regval; 1653 break; 1654 case 12: 1655 vmxctx->guest_r12 = regval; 1656 break; 1657 case 13: 1658 vmxctx->guest_r13 = regval; 1659 break; 1660 case 14: 1661 vmxctx->guest_r14 = regval; 1662 break; 1663 case 15: 1664 vmxctx->guest_r15 = regval; 1665 break; 1666 default: 1667 panic("invalid vmx register %d", ident); 1668 } 1669 } 1670 1671 static void 1672 vmx_sync_efer_state(struct vmx *vmx, int vcpu, uint64_t efer) 1673 { 1674 uint64_t ctrl; 1675 1676 /* 1677 * If the "load EFER" VM-entry control is 1 (which we require) then the 1678 * value of EFER.LMA must be identical to "IA-32e mode guest" bit in the 1679 * VM-entry control. 1680 */ 1681 ctrl = vmcs_read(VMCS_ENTRY_CTLS); 1682 if ((efer & EFER_LMA) != 0) { 1683 ctrl |= VM_ENTRY_GUEST_LMA; 1684 } else { 1685 ctrl &= ~VM_ENTRY_GUEST_LMA; 1686 } 1687 vmcs_write(VMCS_ENTRY_CTLS, ctrl); 1688 } 1689 1690 static int 1691 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1692 { 1693 uint64_t crval, regval; 1694 1695 /* We only handle mov to %cr0 at this time */ 1696 if ((exitqual & 0xf0) != 0x00) 1697 return (UNHANDLED); 1698 1699 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1700 1701 vmcs_write(VMCS_CR0_SHADOW, regval); 1702 1703 crval = regval | cr0_ones_mask; 1704 crval &= ~cr0_zeros_mask; 1705 1706 const uint64_t old = vmcs_read(VMCS_GUEST_CR0); 1707 const uint64_t diff = crval ^ old; 1708 /* Flush the TLB if the paging or write-protect bits are changing */ 1709 if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) { 1710 vmx_invvpid(vmx, vcpu, 1); 1711 } 1712 1713 vmcs_write(VMCS_GUEST_CR0, crval); 1714 1715 if (regval & CR0_PG) { 1716 uint64_t efer; 1717 1718 /* Keep EFER.LMA properly updated if paging is enabled */ 1719 efer = vmcs_read(VMCS_GUEST_IA32_EFER); 1720 if (efer & EFER_LME) { 1721 efer |= EFER_LMA; 1722 vmcs_write(VMCS_GUEST_IA32_EFER, efer); 1723 vmx_sync_efer_state(vmx, vcpu, efer); 1724 } 1725 } 1726 1727 return (HANDLED); 1728 } 1729 1730 static int 1731 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1732 { 1733 uint64_t crval, regval; 1734 1735 /* We only handle mov to %cr4 at this time */ 1736 if ((exitqual & 0xf0) != 0x00) 1737 return (UNHANDLED); 1738 1739 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf); 1740 1741 vmcs_write(VMCS_CR4_SHADOW, regval); 1742 1743 crval = regval | cr4_ones_mask; 1744 crval &= ~cr4_zeros_mask; 1745 vmcs_write(VMCS_GUEST_CR4, crval); 1746 1747 return (HANDLED); 1748 } 1749 1750 static int 1751 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual) 1752 { 1753 struct vlapic *vlapic; 1754 uint64_t cr8; 1755 int regnum; 1756 1757 /* We only handle mov %cr8 to/from a register at this time. */ 1758 if ((exitqual & 0xe0) != 0x00) { 1759 return (UNHANDLED); 1760 } 1761 1762 vlapic = vm_lapic(vmx->vm, vcpu); 1763 regnum = (exitqual >> 8) & 0xf; 1764 if (exitqual & 0x10) { 1765 cr8 = vlapic_get_cr8(vlapic); 1766 vmx_set_guest_reg(vmx, vcpu, regnum, cr8); 1767 } else { 1768 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum); 1769 vlapic_set_cr8(vlapic, cr8); 1770 } 1771 1772 return (HANDLED); 1773 } 1774 1775 /* 1776 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL 1777 */ 1778 static int 1779 vmx_cpl(void) 1780 { 1781 uint32_t ssar; 1782 1783 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS); 1784 return ((ssar >> 5) & 0x3); 1785 } 1786 1787 static enum vm_cpu_mode 1788 vmx_cpu_mode(void) 1789 { 1790 uint32_t csar; 1791 1792 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) { 1793 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1794 if (csar & 0x2000) 1795 return (CPU_MODE_64BIT); /* CS.L = 1 */ 1796 else 1797 return (CPU_MODE_COMPATIBILITY); 1798 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) { 1799 return (CPU_MODE_PROTECTED); 1800 } else { 1801 return (CPU_MODE_REAL); 1802 } 1803 } 1804 1805 static enum vm_paging_mode 1806 vmx_paging_mode(void) 1807 { 1808 1809 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG)) 1810 return (PAGING_MODE_FLAT); 1811 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE)) 1812 return (PAGING_MODE_32); 1813 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME) 1814 return (PAGING_MODE_64); 1815 else 1816 return (PAGING_MODE_PAE); 1817 } 1818 1819 static void 1820 vmx_paging_info(struct vm_guest_paging *paging) 1821 { 1822 paging->cr3 = vmcs_read(VMCS_GUEST_CR3); 1823 paging->cpl = vmx_cpl(); 1824 paging->cpu_mode = vmx_cpu_mode(); 1825 paging->paging_mode = vmx_paging_mode(); 1826 } 1827 1828 static void 1829 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa, 1830 uint64_t gla) 1831 { 1832 struct vm_guest_paging paging; 1833 uint32_t csar; 1834 1835 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL; 1836 vmexit->inst_length = 0; 1837 vmexit->u.mmio_emul.gpa = gpa; 1838 vmexit->u.mmio_emul.gla = gla; 1839 vmx_paging_info(&paging); 1840 1841 switch (paging.cpu_mode) { 1842 case CPU_MODE_REAL: 1843 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1844 vmexit->u.mmio_emul.cs_d = 0; 1845 break; 1846 case CPU_MODE_PROTECTED: 1847 case CPU_MODE_COMPATIBILITY: 1848 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE); 1849 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS); 1850 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar); 1851 break; 1852 default: 1853 vmexit->u.mmio_emul.cs_base = 0; 1854 vmexit->u.mmio_emul.cs_d = 0; 1855 break; 1856 } 1857 1858 vie_init_mmio(vie, NULL, 0, &paging, gpa); 1859 } 1860 1861 static void 1862 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual, 1863 uint32_t eax) 1864 { 1865 struct vm_guest_paging paging; 1866 struct vm_inout *inout; 1867 1868 inout = &vmexit->u.inout; 1869 1870 inout->bytes = (qual & 0x7) + 1; 1871 inout->flags = 0; 1872 inout->flags |= (qual & 0x8) ? INOUT_IN : 0; 1873 inout->flags |= (qual & 0x10) ? INOUT_STR : 0; 1874 inout->flags |= (qual & 0x20) ? INOUT_REP : 0; 1875 inout->port = (uint16_t)(qual >> 16); 1876 inout->eax = eax; 1877 if (inout->flags & INOUT_STR) { 1878 uint64_t inst_info; 1879 1880 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO); 1881 1882 /* 1883 * According to the SDM, bits 9:7 encode the address size of the 1884 * ins/outs operation, but only values 0/1/2 are expected, 1885 * corresponding to 16/32/64 bit sizes. 1886 */ 1887 inout->addrsize = 2 << BITX(inst_info, 9, 7); 1888 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 || 1889 inout->addrsize == 8); 1890 1891 if (inout->flags & INOUT_IN) { 1892 /* 1893 * The bits describing the segment in INSTRUCTION_INFO 1894 * are not defined for ins, leaving it to system 1895 * software to assume %es (encoded as 0) 1896 */ 1897 inout->segment = 0; 1898 } else { 1899 /* 1900 * Bits 15-17 encode the segment for OUTS. 1901 * This value follows the standard x86 segment order. 1902 */ 1903 inout->segment = (inst_info >> 15) & 0x7; 1904 } 1905 } 1906 1907 vmexit->exitcode = VM_EXITCODE_INOUT; 1908 vmx_paging_info(&paging); 1909 vie_init_inout(vie, inout, vmexit->inst_length, &paging); 1910 1911 /* The in/out emulation will handle advancing %rip */ 1912 vmexit->inst_length = 0; 1913 } 1914 1915 static int 1916 ept_fault_type(uint64_t ept_qual) 1917 { 1918 int fault_type; 1919 1920 if (ept_qual & EPT_VIOLATION_DATA_WRITE) 1921 fault_type = PROT_WRITE; 1922 else if (ept_qual & EPT_VIOLATION_INST_FETCH) 1923 fault_type = PROT_EXEC; 1924 else 1925 fault_type = PROT_READ; 1926 1927 return (fault_type); 1928 } 1929 1930 static bool 1931 ept_emulation_fault(uint64_t ept_qual) 1932 { 1933 int read, write; 1934 1935 /* EPT fault on an instruction fetch doesn't make sense here */ 1936 if (ept_qual & EPT_VIOLATION_INST_FETCH) 1937 return (false); 1938 1939 /* EPT fault must be a read fault or a write fault */ 1940 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; 1941 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; 1942 if ((read | write) == 0) 1943 return (false); 1944 1945 /* 1946 * The EPT violation must have been caused by accessing a 1947 * guest-physical address that is a translation of a guest-linear 1948 * address. 1949 */ 1950 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 || 1951 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) { 1952 return (false); 1953 } 1954 1955 return (true); 1956 } 1957 1958 static __inline int 1959 apic_access_virtualization(struct vmx *vmx, int vcpuid) 1960 { 1961 uint32_t proc_ctls2; 1962 1963 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1964 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0); 1965 } 1966 1967 static __inline int 1968 x2apic_virtualization(struct vmx *vmx, int vcpuid) 1969 { 1970 uint32_t proc_ctls2; 1971 1972 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 1973 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0); 1974 } 1975 1976 static int 1977 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic, 1978 uint64_t qual) 1979 { 1980 const uint_t offset = APIC_WRITE_OFFSET(qual); 1981 1982 if (!apic_access_virtualization(vmx, vcpuid)) { 1983 /* 1984 * In general there should not be any APIC write VM-exits 1985 * unless APIC-access virtualization is enabled. 1986 * 1987 * However self-IPI virtualization can legitimately trigger 1988 * an APIC-write VM-exit so treat it specially. 1989 */ 1990 if (x2apic_virtualization(vmx, vcpuid) && 1991 offset == APIC_OFFSET_SELF_IPI) { 1992 const uint32_t *apic_regs = 1993 (uint32_t *)(vlapic->apic_page); 1994 const uint32_t vector = 1995 apic_regs[APIC_OFFSET_SELF_IPI / 4]; 1996 1997 vlapic_self_ipi_handler(vlapic, vector); 1998 return (HANDLED); 1999 } else 2000 return (UNHANDLED); 2001 } 2002 2003 switch (offset) { 2004 case APIC_OFFSET_ID: 2005 vlapic_id_write_handler(vlapic); 2006 break; 2007 case APIC_OFFSET_LDR: 2008 vlapic_ldr_write_handler(vlapic); 2009 break; 2010 case APIC_OFFSET_DFR: 2011 vlapic_dfr_write_handler(vlapic); 2012 break; 2013 case APIC_OFFSET_SVR: 2014 vlapic_svr_write_handler(vlapic); 2015 break; 2016 case APIC_OFFSET_ESR: 2017 vlapic_esr_write_handler(vlapic); 2018 break; 2019 case APIC_OFFSET_ICR_LOW: 2020 vlapic_icrlo_write_handler(vlapic); 2021 break; 2022 case APIC_OFFSET_CMCI_LVT: 2023 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT: 2024 vlapic_lvt_write_handler(vlapic, offset); 2025 break; 2026 case APIC_OFFSET_TIMER_ICR: 2027 vlapic_icrtmr_write_handler(vlapic); 2028 break; 2029 case APIC_OFFSET_TIMER_DCR: 2030 vlapic_dcr_write_handler(vlapic); 2031 break; 2032 default: 2033 return (UNHANDLED); 2034 } 2035 return (HANDLED); 2036 } 2037 2038 static bool 2039 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa) 2040 { 2041 2042 if (apic_access_virtualization(vmx, vcpuid) && 2043 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE)) 2044 return (true); 2045 else 2046 return (false); 2047 } 2048 2049 static int 2050 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit) 2051 { 2052 uint64_t qual; 2053 int access_type, offset, allowed; 2054 struct vie *vie; 2055 2056 if (!apic_access_virtualization(vmx, vcpuid)) 2057 return (UNHANDLED); 2058 2059 qual = vmexit->u.vmx.exit_qualification; 2060 access_type = APIC_ACCESS_TYPE(qual); 2061 offset = APIC_ACCESS_OFFSET(qual); 2062 2063 allowed = 0; 2064 if (access_type == 0) { 2065 /* 2066 * Read data access to the following registers is expected. 2067 */ 2068 switch (offset) { 2069 case APIC_OFFSET_APR: 2070 case APIC_OFFSET_PPR: 2071 case APIC_OFFSET_RRR: 2072 case APIC_OFFSET_CMCI_LVT: 2073 case APIC_OFFSET_TIMER_CCR: 2074 allowed = 1; 2075 break; 2076 default: 2077 break; 2078 } 2079 } else if (access_type == 1) { 2080 /* 2081 * Write data access to the following registers is expected. 2082 */ 2083 switch (offset) { 2084 case APIC_OFFSET_VER: 2085 case APIC_OFFSET_APR: 2086 case APIC_OFFSET_PPR: 2087 case APIC_OFFSET_RRR: 2088 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7: 2089 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7: 2090 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7: 2091 case APIC_OFFSET_CMCI_LVT: 2092 case APIC_OFFSET_TIMER_CCR: 2093 allowed = 1; 2094 break; 2095 default: 2096 break; 2097 } 2098 } 2099 2100 if (allowed) { 2101 vie = vm_vie_ctx(vmx->vm, vcpuid); 2102 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset, 2103 VIE_INVALID_GLA); 2104 } 2105 2106 /* 2107 * Regardless of whether the APIC-access is allowed this handler 2108 * always returns UNHANDLED: 2109 * - if the access is allowed then it is handled by emulating the 2110 * instruction that caused the VM-exit (outside the critical section) 2111 * - if the access is not allowed then it will be converted to an 2112 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland. 2113 */ 2114 return (UNHANDLED); 2115 } 2116 2117 static enum task_switch_reason 2118 vmx_task_switch_reason(uint64_t qual) 2119 { 2120 int reason; 2121 2122 reason = (qual >> 30) & 0x3; 2123 switch (reason) { 2124 case 0: 2125 return (TSR_CALL); 2126 case 1: 2127 return (TSR_IRET); 2128 case 2: 2129 return (TSR_JMP); 2130 case 3: 2131 return (TSR_IDT_GATE); 2132 default: 2133 panic("%s: invalid reason %d", __func__, reason); 2134 } 2135 } 2136 2137 static int 2138 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit, 2139 bool is_wrmsr) 2140 { 2141 struct vmxctx *vmxctx = &vmx->ctx[vcpuid]; 2142 const uint32_t ecx = vmxctx->guest_rcx; 2143 vm_msr_result_t res; 2144 uint64_t val = 0; 2145 2146 if (is_wrmsr) { 2147 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1); 2148 val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax; 2149 2150 if (vlapic_owned_msr(ecx)) { 2151 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2152 2153 res = vlapic_wrmsr(vlapic, ecx, val); 2154 } else { 2155 res = vmx_wrmsr(vmx, vcpuid, ecx, val); 2156 } 2157 } else { 2158 vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1); 2159 2160 if (vlapic_owned_msr(ecx)) { 2161 struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid); 2162 2163 res = vlapic_rdmsr(vlapic, ecx, &val); 2164 } else { 2165 res = vmx_rdmsr(vmx, vcpuid, ecx, &val); 2166 } 2167 } 2168 2169 switch (res) { 2170 case VMR_OK: 2171 /* Store rdmsr result in the appropriate registers */ 2172 if (!is_wrmsr) { 2173 vmxctx->guest_rax = (uint32_t)val; 2174 vmxctx->guest_rdx = val >> 32; 2175 } 2176 return (HANDLED); 2177 case VMR_GP: 2178 vm_inject_gp(vmx->vm, vcpuid); 2179 return (HANDLED); 2180 case VMR_UNHANLDED: 2181 vmexit->exitcode = is_wrmsr ? 2182 VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR; 2183 vmexit->u.msr.code = ecx; 2184 vmexit->u.msr.wval = val; 2185 return (UNHANDLED); 2186 default: 2187 panic("unexpected msr result %u\n", res); 2188 } 2189 } 2190 2191 static int 2192 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit) 2193 { 2194 int error, errcode, errcode_valid, handled; 2195 struct vmxctx *vmxctx; 2196 struct vie *vie; 2197 struct vlapic *vlapic; 2198 struct vm_task_switch *ts; 2199 uint32_t idtvec_info, intr_info; 2200 uint32_t intr_type, intr_vec, reason; 2201 uint64_t qual, gpa; 2202 2203 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0); 2204 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0); 2205 2206 handled = UNHANDLED; 2207 vmxctx = &vmx->ctx[vcpu]; 2208 2209 qual = vmexit->u.vmx.exit_qualification; 2210 reason = vmexit->u.vmx.exit_reason; 2211 vmexit->exitcode = VM_EXITCODE_BOGUS; 2212 2213 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1); 2214 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit); 2215 2216 /* 2217 * VM-entry failures during or after loading guest state. 2218 * 2219 * These VM-exits are uncommon but must be handled specially 2220 * as most VM-exit fields are not populated as usual. 2221 */ 2222 if (reason == EXIT_REASON_MCE_DURING_ENTRY) { 2223 vmm_call_trap(T_MCE); 2224 return (1); 2225 } 2226 2227 /* 2228 * VM exits that can be triggered during event delivery need to 2229 * be handled specially by re-injecting the event if the IDT 2230 * vectoring information field's valid bit is set. 2231 * 2232 * See "Information for VM Exits During Event Delivery" in Intel SDM 2233 * for details. 2234 */ 2235 idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO); 2236 if (idtvec_info & VMCS_IDT_VEC_VALID) { 2237 uint32_t errcode = 0; 2238 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2239 errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR); 2240 } 2241 2242 /* Record exit intinfo */ 2243 VERIFY0(vm_exit_intinfo(vmx->vm, vcpu, 2244 vmx_idtvec_to_intinfo(idtvec_info, errcode))); 2245 2246 /* 2247 * If 'virtual NMIs' are being used and the VM-exit 2248 * happened while injecting an NMI during the previous 2249 * VM-entry, then clear "blocking by NMI" in the 2250 * Guest Interruptibility-State so the NMI can be 2251 * reinjected on the subsequent VM-entry. 2252 * 2253 * However, if the NMI was being delivered through a task 2254 * gate, then the new task must start execution with NMIs 2255 * blocked so don't clear NMI blocking in this case. 2256 */ 2257 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2258 if (intr_type == VMCS_INTR_T_NMI) { 2259 if (reason != EXIT_REASON_TASK_SWITCH) 2260 vmx_clear_nmi_blocking(vmx, vcpu); 2261 else 2262 vmx_assert_nmi_blocking(vmx, vcpu); 2263 } 2264 2265 /* 2266 * Update VM-entry instruction length if the event being 2267 * delivered was a software interrupt or software exception. 2268 */ 2269 if (intr_type == VMCS_INTR_T_SWINTR || 2270 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || 2271 intr_type == VMCS_INTR_T_SWEXCEPTION) { 2272 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2273 } 2274 } 2275 2276 switch (reason) { 2277 case EXIT_REASON_TRIPLE_FAULT: 2278 (void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT); 2279 handled = HANDLED; 2280 break; 2281 case EXIT_REASON_TASK_SWITCH: 2282 ts = &vmexit->u.task_switch; 2283 ts->tsssel = qual & 0xffff; 2284 ts->reason = vmx_task_switch_reason(qual); 2285 ts->ext = 0; 2286 ts->errcode_valid = 0; 2287 vmx_paging_info(&ts->paging); 2288 /* 2289 * If the task switch was due to a CALL, JMP, IRET, software 2290 * interrupt (INT n) or software exception (INT3, INTO), 2291 * then the saved %rip references the instruction that caused 2292 * the task switch. The instruction length field in the VMCS 2293 * is valid in this case. 2294 * 2295 * In all other cases (e.g., NMI, hardware exception) the 2296 * saved %rip is one that would have been saved in the old TSS 2297 * had the task switch completed normally so the instruction 2298 * length field is not needed in this case and is explicitly 2299 * set to 0. 2300 */ 2301 if (ts->reason == TSR_IDT_GATE) { 2302 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID, 2303 ("invalid idtvec_info %x for IDT task switch", 2304 idtvec_info)); 2305 intr_type = idtvec_info & VMCS_INTR_T_MASK; 2306 if (intr_type != VMCS_INTR_T_SWINTR && 2307 intr_type != VMCS_INTR_T_SWEXCEPTION && 2308 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) { 2309 /* Task switch triggered by external event */ 2310 ts->ext = 1; 2311 vmexit->inst_length = 0; 2312 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) { 2313 ts->errcode_valid = 1; 2314 ts->errcode = 2315 vmcs_read(VMCS_IDT_VECTORING_ERROR); 2316 } 2317 } 2318 } 2319 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH; 2320 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts); 2321 break; 2322 case EXIT_REASON_CR_ACCESS: 2323 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1); 2324 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual); 2325 switch (qual & 0xf) { 2326 case 0: 2327 handled = vmx_emulate_cr0_access(vmx, vcpu, qual); 2328 break; 2329 case 4: 2330 handled = vmx_emulate_cr4_access(vmx, vcpu, qual); 2331 break; 2332 case 8: 2333 handled = vmx_emulate_cr8_access(vmx, vcpu, qual); 2334 break; 2335 } 2336 break; 2337 case EXIT_REASON_RDMSR: 2338 case EXIT_REASON_WRMSR: 2339 handled = vmx_handle_msr(vmx, vcpu, vmexit, 2340 reason == EXIT_REASON_WRMSR); 2341 break; 2342 case EXIT_REASON_HLT: 2343 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1); 2344 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit); 2345 vmexit->exitcode = VM_EXITCODE_HLT; 2346 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS); 2347 break; 2348 case EXIT_REASON_MTF: 2349 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1); 2350 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit); 2351 vmexit->exitcode = VM_EXITCODE_MTRAP; 2352 vmexit->inst_length = 0; 2353 break; 2354 case EXIT_REASON_PAUSE: 2355 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1); 2356 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit); 2357 vmexit->exitcode = VM_EXITCODE_PAUSE; 2358 break; 2359 case EXIT_REASON_INTR_WINDOW: 2360 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1); 2361 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit); 2362 ASSERT(vmx_int_window_exiting(vmx, vcpu)); 2363 vmx_clear_int_window_exiting(vmx, vcpu); 2364 return (1); 2365 case EXIT_REASON_EXT_INTR: 2366 /* 2367 * External interrupts serve only to cause VM exits and allow 2368 * the host interrupt handler to run. 2369 * 2370 * If this external interrupt triggers a virtual interrupt 2371 * to a VM, then that state will be recorded by the 2372 * host interrupt handler in the VM's softc. We will inject 2373 * this virtual interrupt during the subsequent VM enter. 2374 */ 2375 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2376 SDT_PROBE4(vmm, vmx, exit, interrupt, 2377 vmx, vcpu, vmexit, intr_info); 2378 2379 /* 2380 * XXX: Ignore this exit if VMCS_INTR_VALID is not set. 2381 * This appears to be a bug in VMware Fusion? 2382 */ 2383 if (!(intr_info & VMCS_INTR_VALID)) 2384 return (1); 2385 KASSERT((intr_info & VMCS_INTR_VALID) != 0 && 2386 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR, 2387 ("VM exit interruption info invalid: %x", intr_info)); 2388 vmx_trigger_hostintr(intr_info & 0xff); 2389 2390 /* 2391 * This is special. We want to treat this as an 'handled' 2392 * VM-exit but not increment the instruction pointer. 2393 */ 2394 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1); 2395 return (1); 2396 case EXIT_REASON_NMI_WINDOW: 2397 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit); 2398 /* Exit to allow the pending virtual NMI to be injected */ 2399 if (vm_nmi_pending(vmx->vm, vcpu)) 2400 vmx_inject_nmi(vmx, vcpu); 2401 ASSERT(vmx_nmi_window_exiting(vmx, vcpu)); 2402 vmx_clear_nmi_window_exiting(vmx, vcpu); 2403 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1); 2404 return (1); 2405 case EXIT_REASON_INOUT: 2406 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1); 2407 vie = vm_vie_ctx(vmx->vm, vcpu); 2408 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax); 2409 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit); 2410 break; 2411 case EXIT_REASON_CPUID: 2412 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1); 2413 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit); 2414 vcpu_emulate_cpuid(vmx->vm, vcpu, 2415 (uint64_t *)&vmxctx->guest_rax, 2416 (uint64_t *)&vmxctx->guest_rbx, 2417 (uint64_t *)&vmxctx->guest_rcx, 2418 (uint64_t *)&vmxctx->guest_rdx); 2419 handled = HANDLED; 2420 break; 2421 case EXIT_REASON_EXCEPTION: 2422 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1); 2423 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2424 KASSERT((intr_info & VMCS_INTR_VALID) != 0, 2425 ("VM exit interruption info invalid: %x", intr_info)); 2426 2427 intr_vec = intr_info & 0xff; 2428 intr_type = intr_info & VMCS_INTR_T_MASK; 2429 2430 /* 2431 * If Virtual NMIs control is 1 and the VM-exit is due to a 2432 * fault encountered during the execution of IRET then we must 2433 * restore the state of "virtual-NMI blocking" before resuming 2434 * the guest. 2435 * 2436 * See "Resuming Guest Software after Handling an Exception". 2437 * See "Information for VM Exits Due to Vectored Events". 2438 */ 2439 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2440 (intr_vec != IDT_DF) && 2441 (intr_info & EXIT_QUAL_NMIUDTI) != 0) 2442 vmx_restore_nmi_blocking(vmx, vcpu); 2443 2444 /* 2445 * The NMI has already been handled in vmx_exit_handle_nmi(). 2446 */ 2447 if (intr_type == VMCS_INTR_T_NMI) 2448 return (1); 2449 2450 /* 2451 * Call the machine check handler by hand. Also don't reflect 2452 * the machine check back into the guest. 2453 */ 2454 if (intr_vec == IDT_MC) { 2455 vmm_call_trap(T_MCE); 2456 return (1); 2457 } 2458 2459 /* 2460 * If the hypervisor has requested user exits for 2461 * debug exceptions, bounce them out to userland. 2462 */ 2463 if (intr_type == VMCS_INTR_T_SWEXCEPTION && 2464 intr_vec == IDT_BP && 2465 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) { 2466 vmexit->exitcode = VM_EXITCODE_BPT; 2467 vmexit->u.bpt.inst_length = vmexit->inst_length; 2468 vmexit->inst_length = 0; 2469 break; 2470 } 2471 2472 if (intr_vec == IDT_PF) { 2473 vmxctx->guest_cr2 = qual; 2474 } 2475 2476 /* 2477 * Software exceptions exhibit trap-like behavior. This in 2478 * turn requires populating the VM-entry instruction length 2479 * so that the %rip in the trap frame is past the INT3/INTO 2480 * instruction. 2481 */ 2482 if (intr_type == VMCS_INTR_T_SWEXCEPTION) 2483 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length); 2484 2485 /* Reflect all other exceptions back into the guest */ 2486 errcode_valid = errcode = 0; 2487 if (intr_info & VMCS_INTR_DEL_ERRCODE) { 2488 errcode_valid = 1; 2489 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE); 2490 } 2491 SDT_PROBE5(vmm, vmx, exit, exception, 2492 vmx, vcpu, vmexit, intr_vec, errcode); 2493 error = vm_inject_exception(vmx->vm, vcpu, intr_vec, 2494 errcode_valid, errcode, 0); 2495 KASSERT(error == 0, ("%s: vm_inject_exception error %d", 2496 __func__, error)); 2497 return (1); 2498 2499 case EXIT_REASON_EPT_FAULT: 2500 /* 2501 * If 'gpa' lies within the address space allocated to 2502 * memory then this must be a nested page fault otherwise 2503 * this must be an instruction that accesses MMIO space. 2504 */ 2505 gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS); 2506 if (vm_mem_allocated(vmx->vm, vcpu, gpa) || 2507 apic_access_fault(vmx, vcpu, gpa)) { 2508 vmexit->exitcode = VM_EXITCODE_PAGING; 2509 vmexit->inst_length = 0; 2510 vmexit->u.paging.gpa = gpa; 2511 vmexit->u.paging.fault_type = ept_fault_type(qual); 2512 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1); 2513 SDT_PROBE5(vmm, vmx, exit, nestedfault, 2514 vmx, vcpu, vmexit, gpa, qual); 2515 } else if (ept_emulation_fault(qual)) { 2516 vie = vm_vie_ctx(vmx->vm, vcpu); 2517 vmexit_mmio_emul(vmexit, vie, gpa, 2518 vmcs_read(VMCS_GUEST_LINEAR_ADDRESS)); 2519 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1); 2520 SDT_PROBE4(vmm, vmx, exit, mmiofault, 2521 vmx, vcpu, vmexit, gpa); 2522 } 2523 /* 2524 * If Virtual NMIs control is 1 and the VM-exit is due to an 2525 * EPT fault during the execution of IRET then we must restore 2526 * the state of "virtual-NMI blocking" before resuming. 2527 * 2528 * See description of "NMI unblocking due to IRET" in 2529 * "Exit Qualification for EPT Violations". 2530 */ 2531 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 && 2532 (qual & EXIT_QUAL_NMIUDTI) != 0) 2533 vmx_restore_nmi_blocking(vmx, vcpu); 2534 break; 2535 case EXIT_REASON_VIRTUALIZED_EOI: 2536 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI; 2537 vmexit->u.ioapic_eoi.vector = qual & 0xFF; 2538 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit); 2539 vmexit->inst_length = 0; /* trap-like */ 2540 break; 2541 case EXIT_REASON_APIC_ACCESS: 2542 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit); 2543 handled = vmx_handle_apic_access(vmx, vcpu, vmexit); 2544 break; 2545 case EXIT_REASON_APIC_WRITE: 2546 /* 2547 * APIC-write VM exit is trap-like so the %rip is already 2548 * pointing to the next instruction. 2549 */ 2550 vmexit->inst_length = 0; 2551 vlapic = vm_lapic(vmx->vm, vcpu); 2552 SDT_PROBE4(vmm, vmx, exit, apicwrite, 2553 vmx, vcpu, vmexit, vlapic); 2554 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual); 2555 break; 2556 case EXIT_REASON_XSETBV: 2557 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit); 2558 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit); 2559 break; 2560 case EXIT_REASON_MONITOR: 2561 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit); 2562 vmexit->exitcode = VM_EXITCODE_MONITOR; 2563 break; 2564 case EXIT_REASON_MWAIT: 2565 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit); 2566 vmexit->exitcode = VM_EXITCODE_MWAIT; 2567 break; 2568 case EXIT_REASON_TPR: 2569 vlapic = vm_lapic(vmx->vm, vcpu); 2570 vlapic_sync_tpr(vlapic); 2571 vmexit->inst_length = 0; 2572 handled = HANDLED; 2573 break; 2574 case EXIT_REASON_VMCALL: 2575 case EXIT_REASON_VMCLEAR: 2576 case EXIT_REASON_VMLAUNCH: 2577 case EXIT_REASON_VMPTRLD: 2578 case EXIT_REASON_VMPTRST: 2579 case EXIT_REASON_VMREAD: 2580 case EXIT_REASON_VMRESUME: 2581 case EXIT_REASON_VMWRITE: 2582 case EXIT_REASON_VMXOFF: 2583 case EXIT_REASON_VMXON: 2584 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit); 2585 vmexit->exitcode = VM_EXITCODE_VMINSN; 2586 break; 2587 case EXIT_REASON_INVD: 2588 case EXIT_REASON_WBINVD: 2589 /* ignore exit */ 2590 handled = HANDLED; 2591 break; 2592 default: 2593 SDT_PROBE4(vmm, vmx, exit, unknown, 2594 vmx, vcpu, vmexit, reason); 2595 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1); 2596 break; 2597 } 2598 2599 if (handled) { 2600 /* 2601 * It is possible that control is returned to userland 2602 * even though we were able to handle the VM exit in the 2603 * kernel. 2604 * 2605 * In such a case we want to make sure that the userland 2606 * restarts guest execution at the instruction *after* 2607 * the one we just processed. Therefore we update the 2608 * guest rip in the VMCS and in 'vmexit'. 2609 */ 2610 vmexit->rip += vmexit->inst_length; 2611 vmexit->inst_length = 0; 2612 vmcs_write(VMCS_GUEST_RIP, vmexit->rip); 2613 } else { 2614 if (vmexit->exitcode == VM_EXITCODE_BOGUS) { 2615 /* 2616 * If this VM exit was not claimed by anybody then 2617 * treat it as a generic VMX exit. 2618 */ 2619 vmexit->exitcode = VM_EXITCODE_VMX; 2620 vmexit->u.vmx.status = VM_SUCCESS; 2621 vmexit->u.vmx.inst_type = 0; 2622 vmexit->u.vmx.inst_error = 0; 2623 } else { 2624 /* 2625 * The exitcode and collateral have been populated. 2626 * The VM exit will be processed further in userland. 2627 */ 2628 } 2629 } 2630 2631 SDT_PROBE4(vmm, vmx, exit, return, 2632 vmx, vcpu, vmexit, handled); 2633 return (handled); 2634 } 2635 2636 static void 2637 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit) 2638 { 2639 2640 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS, 2641 ("vmx_exit_inst_error: invalid inst_fail_status %d", 2642 vmxctx->inst_fail_status)); 2643 2644 vmexit->inst_length = 0; 2645 vmexit->exitcode = VM_EXITCODE_VMX; 2646 vmexit->u.vmx.status = vmxctx->inst_fail_status; 2647 vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR); 2648 vmexit->u.vmx.exit_reason = ~0; 2649 vmexit->u.vmx.exit_qualification = ~0; 2650 2651 switch (rc) { 2652 case VMX_VMRESUME_ERROR: 2653 case VMX_VMLAUNCH_ERROR: 2654 case VMX_INVEPT_ERROR: 2655 case VMX_VMWRITE_ERROR: 2656 vmexit->u.vmx.inst_type = rc; 2657 break; 2658 default: 2659 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc); 2660 } 2661 } 2662 2663 /* 2664 * If the NMI-exiting VM execution control is set to '1' then an NMI in 2665 * non-root operation causes a VM-exit. NMI blocking is in effect so it is 2666 * sufficient to simply vector to the NMI handler via a software interrupt. 2667 * However, this must be done before maskable interrupts are enabled 2668 * otherwise the "iret" issued by an interrupt handler will incorrectly 2669 * clear NMI blocking. 2670 */ 2671 static __inline void 2672 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit) 2673 { 2674 ASSERT(!interrupts_enabled()); 2675 2676 if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) { 2677 uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO); 2678 ASSERT(intr_info & VMCS_INTR_VALID); 2679 2680 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) { 2681 ASSERT3U(intr_info & 0xff, ==, IDT_NMI); 2682 vmm_call_trap(T_NMIFLT); 2683 } 2684 } 2685 } 2686 2687 static __inline void 2688 vmx_dr_enter_guest(struct vmxctx *vmxctx) 2689 { 2690 uint64_t rflags; 2691 2692 /* Save host control debug registers. */ 2693 vmxctx->host_dr7 = rdr7(); 2694 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR); 2695 2696 /* 2697 * Disable debugging in DR7 and DEBUGCTL to avoid triggering 2698 * exceptions in the host based on the guest DRx values. The 2699 * guest DR7 and DEBUGCTL are saved/restored in the VMCS. 2700 */ 2701 load_dr7(0); 2702 wrmsr(MSR_DEBUGCTLMSR, 0); 2703 2704 /* 2705 * Disable single stepping the kernel to avoid corrupting the 2706 * guest DR6. A debugger might still be able to corrupt the 2707 * guest DR6 by setting a breakpoint after this point and then 2708 * single stepping. 2709 */ 2710 rflags = read_rflags(); 2711 vmxctx->host_tf = rflags & PSL_T; 2712 write_rflags(rflags & ~PSL_T); 2713 2714 /* Save host debug registers. */ 2715 vmxctx->host_dr0 = rdr0(); 2716 vmxctx->host_dr1 = rdr1(); 2717 vmxctx->host_dr2 = rdr2(); 2718 vmxctx->host_dr3 = rdr3(); 2719 vmxctx->host_dr6 = rdr6(); 2720 2721 /* Restore guest debug registers. */ 2722 load_dr0(vmxctx->guest_dr0); 2723 load_dr1(vmxctx->guest_dr1); 2724 load_dr2(vmxctx->guest_dr2); 2725 load_dr3(vmxctx->guest_dr3); 2726 load_dr6(vmxctx->guest_dr6); 2727 } 2728 2729 static __inline void 2730 vmx_dr_leave_guest(struct vmxctx *vmxctx) 2731 { 2732 2733 /* Save guest debug registers. */ 2734 vmxctx->guest_dr0 = rdr0(); 2735 vmxctx->guest_dr1 = rdr1(); 2736 vmxctx->guest_dr2 = rdr2(); 2737 vmxctx->guest_dr3 = rdr3(); 2738 vmxctx->guest_dr6 = rdr6(); 2739 2740 /* 2741 * Restore host debug registers. Restore DR7, DEBUGCTL, and 2742 * PSL_T last. 2743 */ 2744 load_dr0(vmxctx->host_dr0); 2745 load_dr1(vmxctx->host_dr1); 2746 load_dr2(vmxctx->host_dr2); 2747 load_dr3(vmxctx->host_dr3); 2748 load_dr6(vmxctx->host_dr6); 2749 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl); 2750 load_dr7(vmxctx->host_dr7); 2751 write_rflags(read_rflags() | vmxctx->host_tf); 2752 } 2753 2754 static int 2755 vmx_run(void *arg, int vcpu, uint64_t rip) 2756 { 2757 int rc, handled, launched; 2758 struct vmx *vmx; 2759 struct vm *vm; 2760 struct vmxctx *vmxctx; 2761 uintptr_t vmcs_pa; 2762 struct vm_exit *vmexit; 2763 struct vlapic *vlapic; 2764 uint32_t exit_reason; 2765 bool tpr_shadow_active; 2766 vm_client_t *vmc; 2767 2768 vmx = arg; 2769 vm = vmx->vm; 2770 vmcs_pa = vmx->vmcs_pa[vcpu]; 2771 vmxctx = &vmx->ctx[vcpu]; 2772 vlapic = vm_lapic(vm, vcpu); 2773 vmexit = vm_exitinfo(vm, vcpu); 2774 vmc = vm_get_vmclient(vm, vcpu); 2775 launched = 0; 2776 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) && 2777 !vmx_cap_en(vmx, VMX_CAP_APICV) && 2778 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0; 2779 2780 vmx_msr_guest_enter(vmx, vcpu); 2781 2782 vmcs_load(vmcs_pa); 2783 2784 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0); 2785 vmx->vmcs_state[vcpu] = VS_LOADED; 2786 2787 /* 2788 * XXX 2789 * We do this every time because we may setup the virtual machine 2790 * from a different process than the one that actually runs it. 2791 * 2792 * If the life of a virtual machine was spent entirely in the context 2793 * of a single process we could do this once in vmx_vminit(). 2794 */ 2795 vmcs_write(VMCS_HOST_CR3, rcr3()); 2796 2797 vmcs_write(VMCS_GUEST_RIP, rip); 2798 vmx_set_pcpu_defaults(vmx, vcpu); 2799 do { 2800 enum event_inject_state inject_state; 2801 uint64_t eptgen; 2802 2803 ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip); 2804 2805 handled = UNHANDLED; 2806 2807 /* 2808 * Perform initial event/exception/interrupt injection before 2809 * host CPU interrupts are disabled. 2810 */ 2811 inject_state = vmx_inject_events(vmx, vcpu, rip); 2812 2813 /* 2814 * Interrupts are disabled from this point on until the 2815 * guest starts executing. This is done for the following 2816 * reasons: 2817 * 2818 * If an AST is asserted on this thread after the check below, 2819 * then the IPI_AST notification will not be lost, because it 2820 * will cause a VM exit due to external interrupt as soon as 2821 * the guest state is loaded. 2822 * 2823 * A posted interrupt after vmx_inject_vlapic() will not be 2824 * "lost" because it will be held pending in the host APIC 2825 * because interrupts are disabled. The pending interrupt will 2826 * be recognized as soon as the guest state is loaded. 2827 * 2828 * The same reasoning applies to the IPI generated by vmspace 2829 * invalidation. 2830 */ 2831 disable_intr(); 2832 2833 /* 2834 * If not precluded by existing events, inject any interrupt 2835 * pending on the vLAPIC. As a lock-less operation, it is safe 2836 * (and prudent) to perform with host CPU interrupts disabled. 2837 */ 2838 if (inject_state == EIS_CAN_INJECT) { 2839 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic); 2840 } 2841 2842 /* 2843 * Check for vCPU bail-out conditions. This must be done after 2844 * vmx_inject_events() to detect a triple-fault condition. 2845 */ 2846 if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) { 2847 enable_intr(); 2848 break; 2849 } 2850 2851 if (vcpu_run_state_pending(vm, vcpu)) { 2852 enable_intr(); 2853 vm_exit_run_state(vmx->vm, vcpu, rip); 2854 break; 2855 } 2856 2857 /* 2858 * If subsequent activity queued events which require injection 2859 * handling, take another lap to handle them. 2860 */ 2861 if (vmx_inject_recheck(vmx, vcpu, inject_state)) { 2862 enable_intr(); 2863 handled = HANDLED; 2864 continue; 2865 } 2866 2867 if ((rc = smt_acquire()) != 1) { 2868 enable_intr(); 2869 vmexit->rip = rip; 2870 vmexit->inst_length = 0; 2871 if (rc == -1) { 2872 vmexit->exitcode = VM_EXITCODE_HT; 2873 } else { 2874 vmexit->exitcode = VM_EXITCODE_BOGUS; 2875 handled = HANDLED; 2876 } 2877 break; 2878 } 2879 2880 /* 2881 * If this thread has gone off-cpu due to mutex operations 2882 * during vmx_run, the VMCS will have been unloaded, forcing a 2883 * re-VMLAUNCH as opposed to VMRESUME. 2884 */ 2885 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0; 2886 /* 2887 * Restoration of the GDT limit is taken care of by 2888 * vmx_savectx(). Since the maximum practical index for the 2889 * IDT is 255, restoring its limits from the post-VMX-exit 2890 * default of 0xffff is not a concern. 2891 * 2892 * Only 64-bit hypervisor callers are allowed, which forgoes 2893 * the need to restore any LDT descriptor. Toss an error to 2894 * anyone attempting to break that rule. 2895 */ 2896 if (curproc->p_model != DATAMODEL_LP64) { 2897 smt_release(); 2898 enable_intr(); 2899 bzero(vmexit, sizeof (*vmexit)); 2900 vmexit->rip = rip; 2901 vmexit->exitcode = VM_EXITCODE_VMX; 2902 vmexit->u.vmx.status = VM_FAIL_INVALID; 2903 handled = UNHANDLED; 2904 break; 2905 } 2906 2907 if (tpr_shadow_active) { 2908 vmx_tpr_shadow_enter(vlapic); 2909 } 2910 2911 /* 2912 * Indicate activation of vmspace (EPT) table just prior to VMX 2913 * entry, checking for the necessity of an invept invalidation. 2914 */ 2915 eptgen = vmc_table_enter(vmc); 2916 if (vmx->eptgen[curcpu] != eptgen) { 2917 /* 2918 * VMspace generation does not match what was previously 2919 * used on this host CPU, so all mappings associated 2920 * with this EP4TA must be invalidated. 2921 */ 2922 invept(1, vmx->eptp); 2923 vmx->eptgen[curcpu] = eptgen; 2924 } 2925 2926 vcpu_ustate_change(vm, vcpu, VU_RUN); 2927 vmx_dr_enter_guest(vmxctx); 2928 2929 /* Perform VMX entry */ 2930 rc = vmx_enter_guest(vmxctx, vmx, launched); 2931 2932 vmx_dr_leave_guest(vmxctx); 2933 vcpu_ustate_change(vm, vcpu, VU_EMU_KERN); 2934 2935 vmx->vmcs_state[vcpu] |= VS_LAUNCHED; 2936 smt_release(); 2937 2938 if (tpr_shadow_active) { 2939 vmx_tpr_shadow_exit(vlapic); 2940 } 2941 2942 /* Collect some information for VM exit processing */ 2943 vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP); 2944 vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH); 2945 vmexit->u.vmx.exit_reason = exit_reason = 2946 (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK); 2947 vmexit->u.vmx.exit_qualification = 2948 vmcs_read(VMCS_EXIT_QUALIFICATION); 2949 /* Update 'nextrip' */ 2950 vmx->state[vcpu].nextrip = rip; 2951 2952 if (rc == VMX_GUEST_VMEXIT) { 2953 vmx_exit_handle_possible_nmi(vmexit); 2954 } 2955 enable_intr(); 2956 vmc_table_exit(vmc); 2957 2958 if (rc == VMX_GUEST_VMEXIT) { 2959 handled = vmx_exit_process(vmx, vcpu, vmexit); 2960 } else { 2961 vmx_exit_inst_error(vmxctx, rc, vmexit); 2962 } 2963 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip, 2964 uint32_t, exit_reason); 2965 rip = vmexit->rip; 2966 } while (handled); 2967 2968 /* If a VM exit has been handled then the exitcode must be BOGUS */ 2969 if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) { 2970 panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit", 2971 vmexit->exitcode); 2972 } 2973 2974 vmcs_clear(vmcs_pa); 2975 vmx_msr_guest_exit(vmx, vcpu); 2976 2977 VERIFY(vmx->vmcs_state[vcpu] != VS_NONE && curthread->t_preempt != 0); 2978 vmx->vmcs_state[vcpu] = VS_NONE; 2979 2980 return (0); 2981 } 2982 2983 static void 2984 vmx_vmcleanup(void *arg) 2985 { 2986 int i; 2987 struct vmx *vmx = arg; 2988 uint16_t maxcpus; 2989 2990 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 2991 (void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE); 2992 kmem_free(vmx->apic_access_page, PAGESIZE); 2993 } else { 2994 VERIFY3P(vmx->apic_access_page, ==, NULL); 2995 } 2996 2997 vmx_msr_bitmap_destroy(vmx); 2998 2999 maxcpus = vm_get_maxcpus(vmx->vm); 3000 for (i = 0; i < maxcpus; i++) 3001 vpid_free(vmx->state[i].vpid); 3002 3003 kmem_free(vmx, sizeof (*vmx)); 3004 } 3005 3006 /* 3007 * Ensure that the VMCS for this vcpu is loaded. 3008 * Returns true if a VMCS load was required. 3009 */ 3010 static bool 3011 vmx_vmcs_access_ensure(struct vmx *vmx, int vcpu) 3012 { 3013 int hostcpu; 3014 3015 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 3016 if (hostcpu != curcpu) { 3017 panic("unexpected vcpu migration %d != %d", 3018 hostcpu, curcpu); 3019 } 3020 /* Earlier logic already took care of the load */ 3021 return (false); 3022 } else { 3023 vmcs_load(vmx->vmcs_pa[vcpu]); 3024 return (true); 3025 } 3026 } 3027 3028 static void 3029 vmx_vmcs_access_done(struct vmx *vmx, int vcpu) 3030 { 3031 int hostcpu; 3032 3033 if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) { 3034 if (hostcpu != curcpu) { 3035 panic("unexpected vcpu migration %d != %d", 3036 hostcpu, curcpu); 3037 } 3038 /* Later logic will take care of the unload */ 3039 } else { 3040 vmcs_clear(vmx->vmcs_pa[vcpu]); 3041 } 3042 } 3043 3044 static uint64_t * 3045 vmxctx_regptr(struct vmxctx *vmxctx, int reg) 3046 { 3047 switch (reg) { 3048 case VM_REG_GUEST_RAX: 3049 return (&vmxctx->guest_rax); 3050 case VM_REG_GUEST_RBX: 3051 return (&vmxctx->guest_rbx); 3052 case VM_REG_GUEST_RCX: 3053 return (&vmxctx->guest_rcx); 3054 case VM_REG_GUEST_RDX: 3055 return (&vmxctx->guest_rdx); 3056 case VM_REG_GUEST_RSI: 3057 return (&vmxctx->guest_rsi); 3058 case VM_REG_GUEST_RDI: 3059 return (&vmxctx->guest_rdi); 3060 case VM_REG_GUEST_RBP: 3061 return (&vmxctx->guest_rbp); 3062 case VM_REG_GUEST_R8: 3063 return (&vmxctx->guest_r8); 3064 case VM_REG_GUEST_R9: 3065 return (&vmxctx->guest_r9); 3066 case VM_REG_GUEST_R10: 3067 return (&vmxctx->guest_r10); 3068 case VM_REG_GUEST_R11: 3069 return (&vmxctx->guest_r11); 3070 case VM_REG_GUEST_R12: 3071 return (&vmxctx->guest_r12); 3072 case VM_REG_GUEST_R13: 3073 return (&vmxctx->guest_r13); 3074 case VM_REG_GUEST_R14: 3075 return (&vmxctx->guest_r14); 3076 case VM_REG_GUEST_R15: 3077 return (&vmxctx->guest_r15); 3078 case VM_REG_GUEST_CR2: 3079 return (&vmxctx->guest_cr2); 3080 case VM_REG_GUEST_DR0: 3081 return (&vmxctx->guest_dr0); 3082 case VM_REG_GUEST_DR1: 3083 return (&vmxctx->guest_dr1); 3084 case VM_REG_GUEST_DR2: 3085 return (&vmxctx->guest_dr2); 3086 case VM_REG_GUEST_DR3: 3087 return (&vmxctx->guest_dr3); 3088 case VM_REG_GUEST_DR6: 3089 return (&vmxctx->guest_dr6); 3090 default: 3091 break; 3092 } 3093 return (NULL); 3094 } 3095 3096 static int 3097 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval) 3098 { 3099 struct vmx *vmx = arg; 3100 uint64_t *regp; 3101 3102 /* VMCS access not required for ctx reads */ 3103 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3104 *retval = *regp; 3105 return (0); 3106 } 3107 3108 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3109 int err = 0; 3110 3111 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3112 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3113 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; 3114 } else { 3115 uint32_t encoding; 3116 3117 encoding = vmcs_field_encoding(reg); 3118 switch (encoding) { 3119 case VMCS_GUEST_CR0: 3120 /* Take the shadow bits into account */ 3121 *retval = vmx_unshadow_cr0(vmcs_read(encoding), 3122 vmcs_read(VMCS_CR0_SHADOW)); 3123 break; 3124 case VMCS_GUEST_CR4: 3125 /* Take the shadow bits into account */ 3126 *retval = vmx_unshadow_cr4(vmcs_read(encoding), 3127 vmcs_read(VMCS_CR4_SHADOW)); 3128 break; 3129 case VMCS_INVALID_ENCODING: 3130 err = EINVAL; 3131 break; 3132 default: 3133 *retval = vmcs_read(encoding); 3134 break; 3135 } 3136 } 3137 3138 if (vmcs_loaded) { 3139 vmx_vmcs_access_done(vmx, vcpu); 3140 } 3141 return (err); 3142 } 3143 3144 static int 3145 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) 3146 { 3147 struct vmx *vmx = arg; 3148 uint64_t *regp; 3149 3150 /* VMCS access not required for ctx writes */ 3151 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) { 3152 *regp = val; 3153 return (0); 3154 } 3155 3156 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3157 int err = 0; 3158 3159 if (reg == VM_REG_GUEST_INTR_SHADOW) { 3160 if (val != 0) { 3161 /* 3162 * Forcing the vcpu into an interrupt shadow is not 3163 * presently supported. 3164 */ 3165 err = EINVAL; 3166 } else { 3167 uint64_t gi; 3168 3169 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY); 3170 gi &= ~HWINTR_BLOCKING; 3171 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi); 3172 err = 0; 3173 } 3174 } else { 3175 uint32_t encoding; 3176 3177 err = 0; 3178 encoding = vmcs_field_encoding(reg); 3179 switch (encoding) { 3180 case VMCS_GUEST_IA32_EFER: 3181 vmcs_write(encoding, val); 3182 vmx_sync_efer_state(vmx, vcpu, val); 3183 break; 3184 case VMCS_GUEST_CR0: 3185 /* 3186 * The guest is not allowed to modify certain bits in 3187 * %cr0 and %cr4. To maintain the illusion of full 3188 * control, they have shadow versions which contain the 3189 * guest-perceived (via reads from the register) values 3190 * as opposed to the guest-effective values. 3191 * 3192 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6. 3193 */ 3194 vmcs_write(VMCS_CR0_SHADOW, val); 3195 vmcs_write(encoding, vmx_fix_cr0(val)); 3196 break; 3197 case VMCS_GUEST_CR4: 3198 /* See above for detail on %cr4 shadowing */ 3199 vmcs_write(VMCS_CR4_SHADOW, val); 3200 vmcs_write(encoding, vmx_fix_cr4(val)); 3201 break; 3202 case VMCS_GUEST_CR3: 3203 vmcs_write(encoding, val); 3204 /* 3205 * Invalidate the guest vcpu's TLB mappings to emulate 3206 * the behavior of updating %cr3. 3207 * 3208 * XXX the processor retains global mappings when %cr3 3209 * is updated but vmx_invvpid() does not. 3210 */ 3211 vmx_invvpid(vmx, vcpu, 3212 vcpu_is_running(vmx->vm, vcpu, NULL)); 3213 break; 3214 case VMCS_INVALID_ENCODING: 3215 err = EINVAL; 3216 break; 3217 default: 3218 vmcs_write(encoding, val); 3219 break; 3220 } 3221 } 3222 3223 if (vmcs_loaded) { 3224 vmx_vmcs_access_done(vmx, vcpu); 3225 } 3226 return (err); 3227 } 3228 3229 static int 3230 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc) 3231 { 3232 struct vmx *vmx = arg; 3233 uint32_t base, limit, access; 3234 3235 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3236 3237 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3238 desc->base = vmcs_read(base); 3239 desc->limit = vmcs_read(limit); 3240 if (access != VMCS_INVALID_ENCODING) { 3241 desc->access = vmcs_read(access); 3242 } else { 3243 desc->access = 0; 3244 } 3245 3246 if (vmcs_loaded) { 3247 vmx_vmcs_access_done(vmx, vcpu); 3248 } 3249 return (0); 3250 } 3251 3252 static int 3253 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc) 3254 { 3255 struct vmx *vmx = arg; 3256 uint32_t base, limit, access; 3257 3258 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3259 3260 vmcs_seg_desc_encoding(seg, &base, &limit, &access); 3261 vmcs_write(base, desc->base); 3262 vmcs_write(limit, desc->limit); 3263 if (access != VMCS_INVALID_ENCODING) { 3264 vmcs_write(access, desc->access); 3265 } 3266 3267 if (vmcs_loaded) { 3268 vmx_vmcs_access_done(vmx, vcpu); 3269 } 3270 return (0); 3271 } 3272 3273 static uint64_t * 3274 vmx_msr_ptr(struct vmx *vmx, int vcpu, uint32_t msr) 3275 { 3276 uint64_t *guest_msrs = vmx->guest_msrs[vcpu]; 3277 3278 switch (msr) { 3279 case MSR_LSTAR: 3280 return (&guest_msrs[IDX_MSR_LSTAR]); 3281 case MSR_CSTAR: 3282 return (&guest_msrs[IDX_MSR_CSTAR]); 3283 case MSR_STAR: 3284 return (&guest_msrs[IDX_MSR_STAR]); 3285 case MSR_SF_MASK: 3286 return (&guest_msrs[IDX_MSR_SF_MASK]); 3287 case MSR_KGSBASE: 3288 return (&guest_msrs[IDX_MSR_KGSBASE]); 3289 case MSR_PAT: 3290 return (&guest_msrs[IDX_MSR_PAT]); 3291 default: 3292 return (NULL); 3293 } 3294 } 3295 3296 static int 3297 vmx_msr_get(void *arg, int vcpu, uint32_t msr, uint64_t *valp) 3298 { 3299 struct vmx *vmx = arg; 3300 3301 ASSERT(valp != NULL); 3302 3303 const uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3304 if (msrp != NULL) { 3305 *valp = *msrp; 3306 return (0); 3307 } 3308 3309 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3310 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3311 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3312 3313 *valp = vmcs_read(vmcs_enc); 3314 3315 if (vmcs_loaded) { 3316 vmx_vmcs_access_done(vmx, vcpu); 3317 } 3318 return (0); 3319 } 3320 3321 return (EINVAL); 3322 } 3323 3324 static int 3325 vmx_msr_set(void *arg, int vcpu, uint32_t msr, uint64_t val) 3326 { 3327 struct vmx *vmx = arg; 3328 3329 /* TODO: mask value */ 3330 3331 uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr); 3332 if (msrp != NULL) { 3333 *msrp = val; 3334 return (0); 3335 } 3336 3337 const uint32_t vmcs_enc = vmcs_msr_encoding(msr); 3338 if (vmcs_enc != VMCS_INVALID_ENCODING) { 3339 bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu); 3340 3341 vmcs_write(vmcs_enc, val); 3342 3343 if (msr == MSR_EFER) { 3344 vmx_sync_efer_state(vmx, vcpu, val); 3345 } 3346 3347 if (vmcs_loaded) { 3348 vmx_vmcs_access_done(vmx, vcpu); 3349 } 3350 return (0); 3351 } 3352 return (EINVAL); 3353 } 3354 3355 static int 3356 vmx_getcap(void *arg, int vcpu, int type, int *retval) 3357 { 3358 struct vmx *vmx = arg; 3359 int vcap; 3360 int ret; 3361 3362 ret = ENOENT; 3363 3364 vcap = vmx->cap[vcpu].set; 3365 3366 switch (type) { 3367 case VM_CAP_HALT_EXIT: 3368 ret = 0; 3369 break; 3370 case VM_CAP_PAUSE_EXIT: 3371 if (cap_pause_exit) 3372 ret = 0; 3373 break; 3374 case VM_CAP_MTRAP_EXIT: 3375 if (cap_monitor_trap) 3376 ret = 0; 3377 break; 3378 case VM_CAP_ENABLE_INVPCID: 3379 if (cap_invpcid) 3380 ret = 0; 3381 break; 3382 case VM_CAP_BPT_EXIT: 3383 ret = 0; 3384 break; 3385 default: 3386 break; 3387 } 3388 3389 if (ret == 0) 3390 *retval = (vcap & (1 << type)) ? 1 : 0; 3391 3392 return (ret); 3393 } 3394 3395 static int 3396 vmx_setcap(void *arg, int vcpu, int type, int val) 3397 { 3398 struct vmx *vmx = arg; 3399 uint32_t baseval, reg, flag; 3400 uint32_t *pptr; 3401 int error; 3402 3403 error = ENOENT; 3404 pptr = NULL; 3405 3406 switch (type) { 3407 case VM_CAP_HALT_EXIT: 3408 error = 0; 3409 pptr = &vmx->cap[vcpu].proc_ctls; 3410 baseval = *pptr; 3411 flag = PROCBASED_HLT_EXITING; 3412 reg = VMCS_PRI_PROC_BASED_CTLS; 3413 break; 3414 case VM_CAP_MTRAP_EXIT: 3415 if (cap_monitor_trap) { 3416 error = 0; 3417 pptr = &vmx->cap[vcpu].proc_ctls; 3418 baseval = *pptr; 3419 flag = PROCBASED_MTF; 3420 reg = VMCS_PRI_PROC_BASED_CTLS; 3421 } 3422 break; 3423 case VM_CAP_PAUSE_EXIT: 3424 if (cap_pause_exit) { 3425 error = 0; 3426 pptr = &vmx->cap[vcpu].proc_ctls; 3427 baseval = *pptr; 3428 flag = PROCBASED_PAUSE_EXITING; 3429 reg = VMCS_PRI_PROC_BASED_CTLS; 3430 } 3431 break; 3432 case VM_CAP_ENABLE_INVPCID: 3433 if (cap_invpcid) { 3434 error = 0; 3435 pptr = &vmx->cap[vcpu].proc_ctls2; 3436 baseval = *pptr; 3437 flag = PROCBASED2_ENABLE_INVPCID; 3438 reg = VMCS_SEC_PROC_BASED_CTLS; 3439 } 3440 break; 3441 case VM_CAP_BPT_EXIT: 3442 error = 0; 3443 3444 /* Don't change the bitmap if we are tracing all exceptions. */ 3445 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) { 3446 pptr = &vmx->cap[vcpu].exc_bitmap; 3447 baseval = *pptr; 3448 flag = (1 << IDT_BP); 3449 reg = VMCS_EXCEPTION_BITMAP; 3450 } 3451 break; 3452 default: 3453 break; 3454 } 3455 3456 if (error != 0) { 3457 return (error); 3458 } 3459 3460 if (pptr != NULL) { 3461 if (val) { 3462 baseval |= flag; 3463 } else { 3464 baseval &= ~flag; 3465 } 3466 vmcs_load(vmx->vmcs_pa[vcpu]); 3467 vmcs_write(reg, baseval); 3468 vmcs_clear(vmx->vmcs_pa[vcpu]); 3469 3470 /* 3471 * Update optional stored flags, and record 3472 * setting 3473 */ 3474 *pptr = baseval; 3475 } 3476 3477 if (val) { 3478 vmx->cap[vcpu].set |= (1 << type); 3479 } else { 3480 vmx->cap[vcpu].set &= ~(1 << type); 3481 } 3482 3483 return (0); 3484 } 3485 3486 struct vlapic_vtx { 3487 struct vlapic vlapic; 3488 3489 /* Align to the nearest cacheline */ 3490 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)]; 3491 3492 /* TMR handling state for posted interrupts */ 3493 uint32_t tmr_active[8]; 3494 uint32_t pending_level[8]; 3495 uint32_t pending_edge[8]; 3496 3497 struct pir_desc *pir_desc; 3498 struct vmx *vmx; 3499 uint_t pending_prio; 3500 boolean_t tmr_sync; 3501 }; 3502 3503 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0); 3504 3505 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4)) 3506 3507 static vcpu_notify_t 3508 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level) 3509 { 3510 struct vlapic_vtx *vlapic_vtx; 3511 struct pir_desc *pir_desc; 3512 uint32_t mask, tmrval; 3513 int idx; 3514 vcpu_notify_t notify = VCPU_NOTIFY_NONE; 3515 3516 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3517 pir_desc = vlapic_vtx->pir_desc; 3518 idx = vector / 32; 3519 mask = 1UL << (vector % 32); 3520 3521 /* 3522 * If the currently asserted TMRs do not match the state requested by 3523 * the incoming interrupt, an exit will be required to reconcile those 3524 * bits in the APIC page. This will keep the vLAPIC behavior in line 3525 * with the architecturally defined expectations. 3526 * 3527 * If actors of mixed types (edge and level) are racing against the same 3528 * vector (toggling its TMR bit back and forth), the results could 3529 * inconsistent. Such circumstances are considered a rare edge case and 3530 * are never expected to be found in the wild. 3531 */ 3532 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]); 3533 if (!level) { 3534 if ((tmrval & mask) != 0) { 3535 /* Edge-triggered interrupt needs TMR de-asserted */ 3536 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask); 3537 atomic_store_rel_long(&pir_desc->pending, 1); 3538 return (VCPU_NOTIFY_EXIT); 3539 } 3540 } else { 3541 if ((tmrval & mask) == 0) { 3542 /* Level-triggered interrupt needs TMR asserted */ 3543 atomic_set_int(&vlapic_vtx->pending_level[idx], mask); 3544 atomic_store_rel_long(&pir_desc->pending, 1); 3545 return (VCPU_NOTIFY_EXIT); 3546 } 3547 } 3548 3549 /* 3550 * If the interrupt request does not require manipulation of the TMRs 3551 * for delivery, set it in PIR descriptor. It cannot be inserted into 3552 * the APIC page while the vCPU might be running. 3553 */ 3554 atomic_set_int(&pir_desc->pir[idx], mask); 3555 3556 /* 3557 * A notification is required whenever the 'pending' bit makes a 3558 * transition from 0->1. 3559 * 3560 * Even if the 'pending' bit is already asserted, notification about 3561 * the incoming interrupt may still be necessary. For example, if a 3562 * vCPU is HLTed with a high PPR, a low priority interrupt would cause 3563 * the 0->1 'pending' transition with a notification, but the vCPU 3564 * would ignore the interrupt for the time being. The same vCPU would 3565 * need to then be notified if a high-priority interrupt arrived which 3566 * satisfied the PPR. 3567 * 3568 * The priorities of interrupts injected while 'pending' is asserted 3569 * are tracked in a custom bitfield 'pending_prio'. Should the 3570 * to-be-injected interrupt exceed the priorities already present, the 3571 * notification is sent. The priorities recorded in 'pending_prio' are 3572 * cleared whenever the 'pending' bit makes another 0->1 transition. 3573 */ 3574 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) { 3575 notify = VCPU_NOTIFY_APIC; 3576 vlapic_vtx->pending_prio = 0; 3577 } else { 3578 const uint_t old_prio = vlapic_vtx->pending_prio; 3579 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT); 3580 3581 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) { 3582 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit); 3583 notify = VCPU_NOTIFY_APIC; 3584 } 3585 } 3586 3587 return (notify); 3588 } 3589 3590 static void 3591 vmx_apicv_accepted(struct vlapic *vlapic, int vector) 3592 { 3593 /* 3594 * When APICv is enabled for an instance, the traditional interrupt 3595 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not 3596 * used and the CPU does the heavy lifting of virtual interrupt 3597 * delivery. For that reason vmx_intr_accepted() should never be called 3598 * when APICv is enabled. 3599 */ 3600 panic("vmx_intr_accepted: not expected to be called"); 3601 } 3602 3603 static void 3604 vmx_apicv_sync_tmr(struct vlapic *vlapic) 3605 { 3606 struct vlapic_vtx *vlapic_vtx; 3607 const uint32_t *tmrs; 3608 3609 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3610 tmrs = &vlapic_vtx->tmr_active[0]; 3611 3612 if (!vlapic_vtx->tmr_sync) { 3613 return; 3614 } 3615 3616 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]); 3617 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]); 3618 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]); 3619 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]); 3620 vlapic_vtx->tmr_sync = B_FALSE; 3621 } 3622 3623 static void 3624 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic) 3625 { 3626 struct vmx *vmx; 3627 uint32_t proc_ctls; 3628 int vcpuid; 3629 3630 vcpuid = vlapic->vcpuid; 3631 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3632 3633 proc_ctls = vmx->cap[vcpuid].proc_ctls; 3634 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW; 3635 proc_ctls |= PROCBASED_CR8_LOAD_EXITING; 3636 proc_ctls |= PROCBASED_CR8_STORE_EXITING; 3637 vmx->cap[vcpuid].proc_ctls = proc_ctls; 3638 3639 vmcs_load(vmx->vmcs_pa[vcpuid]); 3640 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls); 3641 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3642 } 3643 3644 static void 3645 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic) 3646 { 3647 struct vmx *vmx; 3648 uint32_t proc_ctls2; 3649 int vcpuid; 3650 3651 vcpuid = vlapic->vcpuid; 3652 vmx = ((struct vlapic_vtx *)vlapic)->vmx; 3653 3654 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2; 3655 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0, 3656 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2)); 3657 3658 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES; 3659 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE; 3660 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2; 3661 3662 vmcs_load(vmx->vmcs_pa[vcpuid]); 3663 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2); 3664 vmcs_clear(vmx->vmcs_pa[vcpuid]); 3665 3666 vmx_allow_x2apic_msrs(vmx, vcpuid); 3667 } 3668 3669 static void 3670 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu) 3671 { 3672 psm_send_pir_ipi(hostcpu); 3673 } 3674 3675 static void 3676 vmx_apicv_sync(struct vlapic *vlapic) 3677 { 3678 struct vlapic_vtx *vlapic_vtx; 3679 struct pir_desc *pir_desc; 3680 struct LAPIC *lapic; 3681 uint_t i; 3682 3683 vlapic_vtx = (struct vlapic_vtx *)vlapic; 3684 pir_desc = vlapic_vtx->pir_desc; 3685 lapic = vlapic->apic_page; 3686 3687 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) { 3688 return; 3689 } 3690 3691 vlapic_vtx->pending_prio = 0; 3692 3693 /* Make sure the invalid (0-15) vectors are not set */ 3694 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff); 3695 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff); 3696 ASSERT0(pir_desc->pir[0] & 0xffff); 3697 3698 for (i = 0; i <= 7; i++) { 3699 uint32_t *tmrp = &lapic->tmr0 + (i * 4); 3700 uint32_t *irrp = &lapic->irr0 + (i * 4); 3701 3702 const uint32_t pending_level = 3703 atomic_readandclear_int(&vlapic_vtx->pending_level[i]); 3704 const uint32_t pending_edge = 3705 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]); 3706 const uint32_t pending_inject = 3707 atomic_readandclear_int(&pir_desc->pir[i]); 3708 3709 if (pending_level != 0) { 3710 /* 3711 * Level-triggered interrupts assert their corresponding 3712 * bit in the TMR when queued in IRR. 3713 */ 3714 *tmrp |= pending_level; 3715 *irrp |= pending_level; 3716 } 3717 if (pending_edge != 0) { 3718 /* 3719 * When queuing an edge-triggered interrupt in IRR, the 3720 * corresponding bit in the TMR is cleared. 3721 */ 3722 *tmrp &= ~pending_edge; 3723 *irrp |= pending_edge; 3724 } 3725 if (pending_inject != 0) { 3726 /* 3727 * Interrupts which do not require a change to the TMR 3728 * (because it already matches the necessary state) can 3729 * simply be queued in IRR. 3730 */ 3731 *irrp |= pending_inject; 3732 } 3733 3734 if (*tmrp != vlapic_vtx->tmr_active[i]) { 3735 /* Check if VMX EOI triggers require updating. */ 3736 vlapic_vtx->tmr_active[i] = *tmrp; 3737 vlapic_vtx->tmr_sync = B_TRUE; 3738 } 3739 } 3740 } 3741 3742 static void 3743 vmx_tpr_shadow_enter(struct vlapic *vlapic) 3744 { 3745 /* 3746 * When TPR shadowing is enabled, VMX will initiate a guest exit if its 3747 * TPR falls below a threshold priority. That threshold is set to the 3748 * current TPR priority, since guest interrupt status should be 3749 * re-evaluated if its TPR is set lower. 3750 */ 3751 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic)); 3752 } 3753 3754 static void 3755 vmx_tpr_shadow_exit(struct vlapic *vlapic) 3756 { 3757 /* 3758 * Unlike full APICv, where changes to the TPR are reflected in the PPR, 3759 * with TPR shadowing, that duty is relegated to the VMM. Upon exit, 3760 * the PPR is updated to reflect any change in the TPR here. 3761 */ 3762 vlapic_sync_tpr(vlapic); 3763 } 3764 3765 static struct vlapic * 3766 vmx_vlapic_init(void *arg, int vcpuid) 3767 { 3768 struct vmx *vmx = arg; 3769 struct vlapic_vtx *vlapic_vtx; 3770 struct vlapic *vlapic; 3771 3772 vlapic_vtx = kmem_zalloc(sizeof (struct vlapic_vtx), KM_SLEEP); 3773 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid]; 3774 vlapic_vtx->vmx = vmx; 3775 3776 vlapic = &vlapic_vtx->vlapic; 3777 vlapic->vm = vmx->vm; 3778 vlapic->vcpuid = vcpuid; 3779 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid]; 3780 3781 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) { 3782 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts; 3783 } 3784 if (vmx_cap_en(vmx, VMX_CAP_APICV)) { 3785 vlapic->ops.set_intr_ready = vmx_apicv_set_ready; 3786 vlapic->ops.sync_state = vmx_apicv_sync; 3787 vlapic->ops.intr_accepted = vmx_apicv_accepted; 3788 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid; 3789 3790 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) { 3791 vlapic->ops.post_intr = vmx_apicv_notify; 3792 } 3793 } 3794 3795 vlapic_init(vlapic); 3796 3797 return (vlapic); 3798 } 3799 3800 static void 3801 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic) 3802 { 3803 vlapic_cleanup(vlapic); 3804 kmem_free(vlapic, sizeof (struct vlapic_vtx)); 3805 } 3806 3807 static void 3808 vmx_pause(void *arg, int vcpuid) 3809 { 3810 struct vmx *vmx = arg; 3811 3812 VERIFY(vmx_vmcs_access_ensure(vmx, vcpuid)); 3813 3814 /* Stash any interrupt/exception pending injection. */ 3815 vmx_stash_intinfo(vmx, vcpuid); 3816 3817 /* 3818 * Now that no event is pending injection, interrupt-window exiting and 3819 * NMI-window exiting can be disabled. If/when this vCPU is made to run 3820 * again, those conditions will be reinstated when the now-queued events 3821 * are re-injected. 3822 */ 3823 vmx_clear_nmi_window_exiting(vmx, vcpuid); 3824 vmx_clear_int_window_exiting(vmx, vcpuid); 3825 3826 vmx_vmcs_access_done(vmx, vcpuid); 3827 } 3828 3829 static void 3830 vmx_savectx(void *arg, int vcpu) 3831 { 3832 struct vmx *vmx = arg; 3833 3834 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3835 vmcs_clear(vmx->vmcs_pa[vcpu]); 3836 vmx_msr_guest_exit(vmx, vcpu); 3837 /* 3838 * Having VMCLEARed the VMCS, it can no longer be re-entered 3839 * with VMRESUME, but must be VMLAUNCHed again. 3840 */ 3841 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED; 3842 } 3843 3844 reset_gdtr_limit(); 3845 } 3846 3847 static void 3848 vmx_restorectx(void *arg, int vcpu) 3849 { 3850 struct vmx *vmx = arg; 3851 3852 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED); 3853 3854 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) { 3855 vmx_msr_guest_enter(vmx, vcpu); 3856 vmcs_load(vmx->vmcs_pa[vcpu]); 3857 } 3858 } 3859 3860 static freqratio_res_t 3861 vmx_freq_ratio(uint64_t guest_hz, uint64_t host_hz, uint64_t *mult) 3862 { 3863 if (guest_hz == host_hz) { 3864 *mult = VM_TSCM_NOSCALE; 3865 return (FR_SCALING_NOT_NEEDED); 3866 } 3867 3868 /* VMX support not implemented at this time */ 3869 return (FR_SCALING_NOT_SUPPORTED); 3870 } 3871 3872 struct vmm_ops vmm_ops_intel = { 3873 .init = vmx_init, 3874 .cleanup = vmx_cleanup, 3875 .resume = vmx_restore, 3876 3877 .vminit = vmx_vminit, 3878 .vmrun = vmx_run, 3879 .vmcleanup = vmx_vmcleanup, 3880 .vmgetreg = vmx_getreg, 3881 .vmsetreg = vmx_setreg, 3882 .vmgetdesc = vmx_getdesc, 3883 .vmsetdesc = vmx_setdesc, 3884 .vmgetcap = vmx_getcap, 3885 .vmsetcap = vmx_setcap, 3886 .vlapic_init = vmx_vlapic_init, 3887 .vlapic_cleanup = vmx_vlapic_cleanup, 3888 .vmpause = vmx_pause, 3889 3890 .vmsavectx = vmx_savectx, 3891 .vmrestorectx = vmx_restorectx, 3892 3893 .vmgetmsr = vmx_msr_get, 3894 .vmsetmsr = vmx_msr_set, 3895 3896 .vmfreqratio = vmx_freq_ratio, 3897 .fr_intsize = INTEL_TSCM_INT_SIZE, 3898 .fr_fracsize = INTEL_TSCM_FRAC_SIZE, 3899 }; 3900 3901 /* Side-effect free HW validation derived from checks in vmx_init. */ 3902 int 3903 vmx_x86_supported(const char **msg) 3904 { 3905 int error; 3906 uint32_t tmp; 3907 3908 ASSERT(msg != NULL); 3909 3910 /* Check support for primary processor-based VM-execution controls */ 3911 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, 3912 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING, 3913 PROCBASED_CTLS_ZERO_SETTING, &tmp); 3914 if (error) { 3915 *msg = "processor does not support desired primary " 3916 "processor-based controls"; 3917 return (error); 3918 } 3919 3920 /* Check support for secondary processor-based VM-execution controls */ 3921 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, 3922 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING, 3923 PROCBASED_CTLS2_ZERO_SETTING, &tmp); 3924 if (error) { 3925 *msg = "processor does not support desired secondary " 3926 "processor-based controls"; 3927 return (error); 3928 } 3929 3930 /* Check support for pin-based VM-execution controls */ 3931 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS, 3932 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING, 3933 PINBASED_CTLS_ZERO_SETTING, &tmp); 3934 if (error) { 3935 *msg = "processor does not support desired pin-based controls"; 3936 return (error); 3937 } 3938 3939 /* Check support for VM-exit controls */ 3940 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS, 3941 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp); 3942 if (error) { 3943 *msg = "processor does not support desired exit controls"; 3944 return (error); 3945 } 3946 3947 /* Check support for VM-entry controls */ 3948 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS, 3949 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp); 3950 if (error) { 3951 *msg = "processor does not support desired entry controls"; 3952 return (error); 3953 } 3954 3955 /* Unrestricted guest is nominally optional, but not for us. */ 3956 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2, 3957 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp); 3958 if (error) { 3959 *msg = "processor does not support desired unrestricted guest " 3960 "controls"; 3961 return (error); 3962 } 3963 3964 return (0); 3965 } 3966