xref: /illumos-gate/usr/src/uts/intel/io/vmm/intel/vmx.c (revision ad4335f7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  * Copyright (c) 2018 Joyent, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 /*
32  * This file and its contents are supplied under the terms of the
33  * Common Development and Distribution License ("CDDL"), version 1.0.
34  * You may only use this file in accordance with the terms of version
35  * 1.0 of the CDDL.
36  *
37  * A full copy of the text of the CDDL should have accompanied this
38  * source.  A copy of the CDDL is also available via the Internet at
39  * http://www.illumos.org/license/CDDL.
40  *
41  * Copyright 2015 Pluribus Networks Inc.
42  * Copyright 2018 Joyent, Inc.
43  * Copyright 2022 Oxide Computer Company
44  * Copyright 2022 MNX Cloud, Inc.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/kmem.h>
54 #include <sys/pcpu.h>
55 #include <sys/proc.h>
56 #include <sys/sysctl.h>
57 
58 #include <sys/x86_archext.h>
59 #include <sys/smp_impldefs.h>
60 #include <sys/smt.h>
61 #include <sys/hma.h>
62 #include <sys/trap.h>
63 #include <sys/archsystm.h>
64 
65 #include <machine/psl.h>
66 #include <machine/cpufunc.h>
67 #include <machine/md_var.h>
68 #include <machine/reg.h>
69 #include <machine/segments.h>
70 #include <machine/specialreg.h>
71 #include <machine/vmparam.h>
72 #include <sys/vmm_vm.h>
73 #include <sys/vmm_kernel.h>
74 
75 #include <machine/vmm.h>
76 #include <machine/vmm_dev.h>
77 #include <sys/vmm_instruction_emul.h>
78 #include "vmm_lapic.h"
79 #include "vmm_host.h"
80 #include "vmm_ioport.h"
81 #include "vmm_stat.h"
82 #include "vatpic.h"
83 #include "vlapic.h"
84 #include "vlapic_priv.h"
85 
86 #include "vmcs.h"
87 #include "vmx.h"
88 #include "vmx_msr.h"
89 #include "vmx_controls.h"
90 
91 #define	PINBASED_CTLS_ONE_SETTING					\
92 	(PINBASED_EXTINT_EXITING	|				\
93 	PINBASED_NMI_EXITING		|				\
94 	PINBASED_VIRTUAL_NMI)
95 #define	PINBASED_CTLS_ZERO_SETTING	0
96 
97 #define	PROCBASED_CTLS_WINDOW_SETTING					\
98 	(PROCBASED_INT_WINDOW_EXITING	|				\
99 	PROCBASED_NMI_WINDOW_EXITING)
100 
101 /*
102  * Distinct from FreeBSD bhyve, we consider several additional proc-based
103  * controls necessary:
104  * - TSC offsetting
105  * - HLT exiting
106  */
107 #define	PROCBASED_CTLS_ONE_SETTING					\
108 	(PROCBASED_SECONDARY_CONTROLS	|				\
109 	PROCBASED_TSC_OFFSET		|				\
110 	PROCBASED_HLT_EXITING		|				\
111 	PROCBASED_MWAIT_EXITING		|				\
112 	PROCBASED_MONITOR_EXITING	|				\
113 	PROCBASED_IO_EXITING		|				\
114 	PROCBASED_MSR_BITMAPS		|				\
115 	PROCBASED_CTLS_WINDOW_SETTING	|				\
116 	PROCBASED_CR8_LOAD_EXITING	|				\
117 	PROCBASED_CR8_STORE_EXITING)
118 
119 #define	PROCBASED_CTLS_ZERO_SETTING	\
120 	(PROCBASED_CR3_LOAD_EXITING |	\
121 	PROCBASED_CR3_STORE_EXITING |	\
122 	PROCBASED_IO_BITMAPS)
123 
124 /*
125  * EPT and Unrestricted Guest are considered necessities.  The latter is not a
126  * requirement on FreeBSD, where grub2-bhyve is used to load guests directly
127  * without a bootrom starting in real mode.
128  */
129 #define	PROCBASED_CTLS2_ONE_SETTING		\
130 	(PROCBASED2_ENABLE_EPT |		\
131 	PROCBASED2_UNRESTRICTED_GUEST)
132 #define	PROCBASED_CTLS2_ZERO_SETTING	0
133 
134 #define	VM_EXIT_CTLS_ONE_SETTING					\
135 	(VM_EXIT_SAVE_DEBUG_CONTROLS		|			\
136 	VM_EXIT_HOST_LMA			|			\
137 	VM_EXIT_LOAD_PAT			|			\
138 	VM_EXIT_SAVE_EFER			|			\
139 	VM_EXIT_LOAD_EFER			|			\
140 	VM_EXIT_ACKNOWLEDGE_INTERRUPT)
141 
142 #define	VM_EXIT_CTLS_ZERO_SETTING	0
143 
144 #define	VM_ENTRY_CTLS_ONE_SETTING					\
145 	(VM_ENTRY_LOAD_DEBUG_CONTROLS		|			\
146 	VM_ENTRY_LOAD_EFER)
147 
148 #define	VM_ENTRY_CTLS_ZERO_SETTING					\
149 	(VM_ENTRY_INTO_SMM			|			\
150 	VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
151 
152 /*
153  * Cover the EPT capabilities used by bhyve at present:
154  * - 4-level page walks
155  * - write-back memory type
156  * - INVEPT operations (all types)
157  * - INVVPID operations (single-context only)
158  */
159 #define	EPT_CAPS_REQUIRED			\
160 	(IA32_VMX_EPT_VPID_PWL4 |		\
161 	IA32_VMX_EPT_VPID_TYPE_WB |		\
162 	IA32_VMX_EPT_VPID_INVEPT |		\
163 	IA32_VMX_EPT_VPID_INVEPT_SINGLE |	\
164 	IA32_VMX_EPT_VPID_INVEPT_ALL |		\
165 	IA32_VMX_EPT_VPID_INVVPID |		\
166 	IA32_VMX_EPT_VPID_INVVPID_SINGLE)
167 
168 #define	HANDLED		1
169 #define	UNHANDLED	0
170 
171 SYSCTL_DECL(_hw_vmm);
172 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
173     NULL);
174 
175 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
176 static uint32_t exit_ctls, entry_ctls;
177 
178 static uint64_t cr0_ones_mask, cr0_zeros_mask;
179 
180 static uint64_t cr4_ones_mask, cr4_zeros_mask;
181 
182 static int vmx_initialized;
183 
184 /*
185  * Optional capabilities
186  */
187 
188 /* PAUSE triggers a VM-exit */
189 static int cap_pause_exit;
190 
191 /* WBINVD triggers a VM-exit */
192 static int cap_wbinvd_exit;
193 
194 /* Monitor trap flag */
195 static int cap_monitor_trap;
196 
197 /* Guests are allowed to use INVPCID */
198 static int cap_invpcid;
199 
200 /* Extra capabilities (VMX_CAP_*) beyond the minimum */
201 static enum vmx_caps vmx_capabilities;
202 
203 /* APICv posted interrupt vector */
204 static int pirvec = -1;
205 
206 static uint_t vpid_alloc_failed;
207 
208 int guest_l1d_flush;
209 int guest_l1d_flush_sw;
210 
211 /* MSR save region is composed of an array of 'struct msr_entry' */
212 struct msr_entry {
213 	uint32_t	index;
214 	uint32_t	reserved;
215 	uint64_t	val;
216 };
217 
218 static struct msr_entry msr_load_list[1] __aligned(16);
219 
220 /*
221  * The definitions of SDT probes for VMX.
222  */
223 
224 /* BEGIN CSTYLED */
225 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry,
226     "struct vmx *", "int", "struct vm_exit *");
227 
228 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch,
229     "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *");
230 
231 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess,
232     "struct vmx *", "int", "struct vm_exit *", "uint64_t");
233 
234 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr,
235     "struct vmx *", "int", "struct vm_exit *", "uint32_t");
236 
237 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr,
238     "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t");
239 
240 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt,
241     "struct vmx *", "int", "struct vm_exit *");
242 
243 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap,
244     "struct vmx *", "int", "struct vm_exit *");
245 
246 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause,
247     "struct vmx *", "int", "struct vm_exit *");
248 
249 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow,
250     "struct vmx *", "int", "struct vm_exit *");
251 
252 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt,
253     "struct vmx *", "int", "struct vm_exit *", "uint32_t");
254 
255 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow,
256     "struct vmx *", "int", "struct vm_exit *");
257 
258 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout,
259     "struct vmx *", "int", "struct vm_exit *");
260 
261 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid,
262     "struct vmx *", "int", "struct vm_exit *");
263 
264 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception,
265     "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int");
266 
267 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault,
268     "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t");
269 
270 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault,
271     "struct vmx *", "int", "struct vm_exit *", "uint64_t");
272 
273 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi,
274     "struct vmx *", "int", "struct vm_exit *");
275 
276 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess,
277     "struct vmx *", "int", "struct vm_exit *");
278 
279 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite,
280     "struct vmx *", "int", "struct vm_exit *", "struct vlapic *");
281 
282 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv,
283     "struct vmx *", "int", "struct vm_exit *");
284 
285 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor,
286     "struct vmx *", "int", "struct vm_exit *");
287 
288 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait,
289     "struct vmx *", "int", "struct vm_exit *");
290 
291 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn,
292     "struct vmx *", "int", "struct vm_exit *");
293 
294 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown,
295     "struct vmx *", "int", "struct vm_exit *", "uint32_t");
296 
297 SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
298     "struct vmx *", "int", "struct vm_exit *", "int");
299 /* END CSTYLED */
300 
301 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
302 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
303 static void vmx_apply_tsc_adjust(struct vmx *, int);
304 static void vmx_apicv_sync_tmr(struct vlapic *vlapic);
305 static void vmx_tpr_shadow_enter(struct vlapic *vlapic);
306 static void vmx_tpr_shadow_exit(struct vlapic *vlapic);
307 
308 static void
309 vmx_allow_x2apic_msrs(struct vmx *vmx, int vcpuid)
310 {
311 	/*
312 	 * Allow readonly access to the following x2APIC MSRs from the guest.
313 	 */
314 	guest_msr_ro(vmx, vcpuid, MSR_APIC_ID);
315 	guest_msr_ro(vmx, vcpuid, MSR_APIC_VERSION);
316 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LDR);
317 	guest_msr_ro(vmx, vcpuid, MSR_APIC_SVR);
318 
319 	for (uint_t i = 0; i < 8; i++) {
320 		guest_msr_ro(vmx, vcpuid, MSR_APIC_ISR0 + i);
321 		guest_msr_ro(vmx, vcpuid, MSR_APIC_TMR0 + i);
322 		guest_msr_ro(vmx, vcpuid, MSR_APIC_IRR0 + i);
323 	}
324 
325 	guest_msr_ro(vmx, vcpuid, MSR_APIC_ESR);
326 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_TIMER);
327 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_THERMAL);
328 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_PCINT);
329 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT0);
330 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_LINT1);
331 	guest_msr_ro(vmx, vcpuid, MSR_APIC_LVT_ERROR);
332 	guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR_TIMER);
333 	guest_msr_ro(vmx, vcpuid, MSR_APIC_DCR_TIMER);
334 	guest_msr_ro(vmx, vcpuid, MSR_APIC_ICR);
335 
336 	/*
337 	 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
338 	 *
339 	 * These registers get special treatment described in the section
340 	 * "Virtualizing MSR-Based APIC Accesses".
341 	 */
342 	guest_msr_rw(vmx, vcpuid, MSR_APIC_TPR);
343 	guest_msr_rw(vmx, vcpuid, MSR_APIC_EOI);
344 	guest_msr_rw(vmx, vcpuid, MSR_APIC_SELF_IPI);
345 }
346 
347 static ulong_t
348 vmx_fix_cr0(ulong_t cr0)
349 {
350 	return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
351 }
352 
353 /*
354  * Given a live (VMCS-active) cr0 value, and its shadow counterpart, calculate
355  * the value observable from the guest.
356  */
357 static ulong_t
358 vmx_unshadow_cr0(uint64_t cr0, uint64_t shadow)
359 {
360 	return ((cr0 & ~cr0_ones_mask) |
361 	    (shadow & (cr0_zeros_mask | cr0_ones_mask)));
362 }
363 
364 static ulong_t
365 vmx_fix_cr4(ulong_t cr4)
366 {
367 	return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
368 }
369 
370 /*
371  * Given a live (VMCS-active) cr4 value, and its shadow counterpart, calculate
372  * the value observable from the guest.
373  */
374 static ulong_t
375 vmx_unshadow_cr4(uint64_t cr4, uint64_t shadow)
376 {
377 	return ((cr4 & ~cr4_ones_mask) |
378 	    (shadow & (cr4_zeros_mask | cr4_ones_mask)));
379 }
380 
381 static void
382 vpid_free(int vpid)
383 {
384 	if (vpid < 0 || vpid > 0xffff)
385 		panic("vpid_free: invalid vpid %d", vpid);
386 
387 	/*
388 	 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
389 	 * the unit number allocator.
390 	 */
391 
392 	if (vpid > VM_MAXCPU)
393 		hma_vmx_vpid_free((uint16_t)vpid);
394 }
395 
396 static void
397 vpid_alloc(uint16_t *vpid, int num)
398 {
399 	int i, x;
400 
401 	if (num <= 0 || num > VM_MAXCPU)
402 		panic("invalid number of vpids requested: %d", num);
403 
404 	/*
405 	 * If the "enable vpid" execution control is not enabled then the
406 	 * VPID is required to be 0 for all vcpus.
407 	 */
408 	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
409 		for (i = 0; i < num; i++)
410 			vpid[i] = 0;
411 		return;
412 	}
413 
414 	/*
415 	 * Allocate a unique VPID for each vcpu from the unit number allocator.
416 	 */
417 	for (i = 0; i < num; i++) {
418 		uint16_t tmp;
419 
420 		tmp = hma_vmx_vpid_alloc();
421 		x = (tmp == 0) ? -1 : tmp;
422 
423 		if (x == -1)
424 			break;
425 		else
426 			vpid[i] = x;
427 	}
428 
429 	if (i < num) {
430 		atomic_add_int(&vpid_alloc_failed, 1);
431 
432 		/*
433 		 * If the unit number allocator does not have enough unique
434 		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
435 		 *
436 		 * These VPIDs are not be unique across VMs but this does not
437 		 * affect correctness because the combined mappings are also
438 		 * tagged with the EP4TA which is unique for each VM.
439 		 *
440 		 * It is still sub-optimal because the invvpid will invalidate
441 		 * combined mappings for a particular VPID across all EP4TAs.
442 		 */
443 		while (i-- > 0)
444 			vpid_free(vpid[i]);
445 
446 		for (i = 0; i < num; i++)
447 			vpid[i] = i + 1;
448 	}
449 }
450 
451 static int
452 vmx_cleanup(void)
453 {
454 	/* This is taken care of by the hma registration */
455 	return (0);
456 }
457 
458 static void
459 vmx_restore(void)
460 {
461 	/* No-op on illumos */
462 }
463 
464 static int
465 vmx_init(void)
466 {
467 	int error;
468 	uint64_t fixed0, fixed1;
469 	uint32_t tmp;
470 	enum vmx_caps avail_caps = VMX_CAP_NONE;
471 
472 	/* Check support for primary processor-based VM-execution controls */
473 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
474 	    MSR_VMX_TRUE_PROCBASED_CTLS,
475 	    PROCBASED_CTLS_ONE_SETTING,
476 	    PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
477 	if (error) {
478 		printf("vmx_init: processor does not support desired primary "
479 		    "processor-based controls\n");
480 		return (error);
481 	}
482 
483 	/*
484 	 * Clear interrupt-window/NMI-window exiting from the default proc-based
485 	 * controls. They are set and cleared based on runtime vCPU events.
486 	 */
487 	procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
488 
489 	/* Check support for secondary processor-based VM-execution controls */
490 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
491 	    MSR_VMX_PROCBASED_CTLS2,
492 	    PROCBASED_CTLS2_ONE_SETTING,
493 	    PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
494 	if (error) {
495 		printf("vmx_init: processor does not support desired secondary "
496 		    "processor-based controls\n");
497 		return (error);
498 	}
499 
500 	/* Check support for VPID */
501 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
502 	    MSR_VMX_PROCBASED_CTLS2,
503 	    PROCBASED2_ENABLE_VPID,
504 	    0, &tmp);
505 	if (error == 0)
506 		procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
507 
508 	/* Check support for pin-based VM-execution controls */
509 	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
510 	    MSR_VMX_TRUE_PINBASED_CTLS,
511 	    PINBASED_CTLS_ONE_SETTING,
512 	    PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
513 	if (error) {
514 		printf("vmx_init: processor does not support desired "
515 		    "pin-based controls\n");
516 		return (error);
517 	}
518 
519 	/* Check support for VM-exit controls */
520 	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
521 	    VM_EXIT_CTLS_ONE_SETTING,
522 	    VM_EXIT_CTLS_ZERO_SETTING,
523 	    &exit_ctls);
524 	if (error) {
525 		printf("vmx_init: processor does not support desired "
526 		    "exit controls\n");
527 		return (error);
528 	}
529 
530 	/* Check support for VM-entry controls */
531 	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
532 	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
533 	    &entry_ctls);
534 	if (error) {
535 		printf("vmx_init: processor does not support desired "
536 		    "entry controls\n");
537 		return (error);
538 	}
539 
540 	/*
541 	 * Check support for optional features by testing them
542 	 * as individual bits
543 	 */
544 	cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
545 	    MSR_VMX_PROCBASED_CTLS,
546 	    PROCBASED_MTF, 0,
547 	    &tmp) == 0);
548 
549 	cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
550 	    MSR_VMX_TRUE_PROCBASED_CTLS,
551 	    PROCBASED_PAUSE_EXITING, 0,
552 	    &tmp) == 0);
553 
554 	cap_wbinvd_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
555 	    MSR_VMX_PROCBASED_CTLS2,
556 	    PROCBASED2_WBINVD_EXITING, 0,
557 	    &tmp) == 0);
558 
559 	cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
560 	    MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
561 	    &tmp) == 0);
562 
563 	/*
564 	 * Check for APIC virtualization capabilities:
565 	 * - TPR shadowing
566 	 * - Full APICv (with or without x2APIC support)
567 	 * - Posted interrupt handling
568 	 */
569 	if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS,
570 	    PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) {
571 		avail_caps |= VMX_CAP_TPR_SHADOW;
572 
573 		const uint32_t apicv_bits =
574 		    PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
575 		    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
576 		    PROCBASED2_VIRTUALIZE_X2APIC_MODE |
577 		    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY;
578 		if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
579 		    MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) {
580 			avail_caps |= VMX_CAP_APICV;
581 
582 			/*
583 			 * It may make sense in the future to differentiate
584 			 * hardware (or software) configurations with APICv but
585 			 * no support for accelerating x2APIC mode.
586 			 */
587 			avail_caps |= VMX_CAP_APICV_X2APIC;
588 
589 			error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
590 			    MSR_VMX_TRUE_PINBASED_CTLS,
591 			    PINBASED_POSTED_INTERRUPT, 0, &tmp);
592 			if (error == 0) {
593 				/*
594 				 * If the PSM-provided interfaces for requesting
595 				 * and using a PIR IPI vector are present, use
596 				 * them for posted interrupts.
597 				 */
598 				if (psm_get_pir_ipivect != NULL &&
599 				    psm_send_pir_ipi != NULL) {
600 					pirvec = psm_get_pir_ipivect();
601 					avail_caps |= VMX_CAP_APICV_PIR;
602 				}
603 			}
604 		}
605 	}
606 
607 	/*
608 	 * Check for necessary EPT capabilities
609 	 *
610 	 * TODO: Properly handle when IA32_VMX_EPT_VPID_HW_AD is missing and the
611 	 * hypervisor intends to utilize dirty page tracking.
612 	 */
613 	uint64_t ept_caps = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
614 	if ((ept_caps & EPT_CAPS_REQUIRED) != EPT_CAPS_REQUIRED) {
615 		cmn_err(CE_WARN, "!Inadequate EPT capabilities: %lx", ept_caps);
616 		return (EINVAL);
617 	}
618 
619 #ifdef __FreeBSD__
620 	guest_l1d_flush = (cpu_ia32_arch_caps &
621 	    IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0;
622 	TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
623 
624 	/*
625 	 * L1D cache flush is enabled.  Use IA32_FLUSH_CMD MSR when
626 	 * available.  Otherwise fall back to the software flush
627 	 * method which loads enough data from the kernel text to
628 	 * flush existing L1D content, both on VMX entry and on NMI
629 	 * return.
630 	 */
631 	if (guest_l1d_flush) {
632 		if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) {
633 			guest_l1d_flush_sw = 1;
634 			TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw",
635 			    &guest_l1d_flush_sw);
636 		}
637 		if (guest_l1d_flush_sw) {
638 			if (nmi_flush_l1d_sw <= 1)
639 				nmi_flush_l1d_sw = 1;
640 		} else {
641 			msr_load_list[0].index = MSR_IA32_FLUSH_CMD;
642 			msr_load_list[0].val = IA32_FLUSH_CMD_L1D;
643 		}
644 	}
645 #else
646 	/* L1D flushing is taken care of by smt_acquire() and friends */
647 	guest_l1d_flush = 0;
648 #endif /* __FreeBSD__ */
649 
650 	/*
651 	 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
652 	 */
653 	fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
654 	fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
655 	cr0_ones_mask = fixed0 & fixed1;
656 	cr0_zeros_mask = ~fixed0 & ~fixed1;
657 
658 	/*
659 	 * Since Unrestricted Guest was already verified present, CR0_PE and
660 	 * CR0_PG are allowed to be set to zero in VMX non-root operation
661 	 */
662 	cr0_ones_mask &= ~(CR0_PG | CR0_PE);
663 
664 	/*
665 	 * Do not allow the guest to set CR0_NW or CR0_CD.
666 	 */
667 	cr0_zeros_mask |= (CR0_NW | CR0_CD);
668 
669 	fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
670 	fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
671 	cr4_ones_mask = fixed0 & fixed1;
672 	cr4_zeros_mask = ~fixed0 & ~fixed1;
673 
674 	vmx_msr_init();
675 
676 	vmx_capabilities = avail_caps;
677 	vmx_initialized = 1;
678 
679 	return (0);
680 }
681 
682 static void
683 vmx_trigger_hostintr(int vector)
684 {
685 	VERIFY(vector >= 32 && vector <= 255);
686 	vmx_call_isr(vector - 32);
687 }
688 
689 static void *
690 vmx_vminit(struct vm *vm)
691 {
692 	uint16_t vpid[VM_MAXCPU];
693 	int i, error, datasel;
694 	struct vmx *vmx;
695 	uint32_t exc_bitmap;
696 	uint16_t maxcpus;
697 	uint32_t proc_ctls, proc2_ctls, pin_ctls;
698 	uint64_t apic_access_pa = UINT64_MAX;
699 
700 	vmx = kmem_zalloc(sizeof (struct vmx), KM_SLEEP);
701 	VERIFY3U((uintptr_t)vmx & PAGE_MASK, ==, 0);
702 
703 	vmx->vm = vm;
704 	vmx->eptp = vmspace_table_root(vm_get_vmspace(vm));
705 
706 	/*
707 	 * Clean up EP4TA-tagged guest-physical and combined mappings
708 	 *
709 	 * VMX transitions are not required to invalidate any guest physical
710 	 * mappings. So, it may be possible for stale guest physical mappings
711 	 * to be present in the processor TLBs.
712 	 *
713 	 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
714 	 */
715 	hma_vmx_invept_allcpus((uintptr_t)vmx->eptp);
716 
717 	vmx_msr_bitmap_initialize(vmx);
718 
719 	vpid_alloc(vpid, VM_MAXCPU);
720 
721 	/* Grab the established defaults */
722 	proc_ctls = procbased_ctls;
723 	proc2_ctls = procbased_ctls2;
724 	pin_ctls = pinbased_ctls;
725 	/* For now, default to the available capabilities */
726 	vmx->vmx_caps = vmx_capabilities;
727 
728 	if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) {
729 		proc_ctls |= PROCBASED_USE_TPR_SHADOW;
730 		proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
731 		proc_ctls &= ~PROCBASED_CR8_STORE_EXITING;
732 	}
733 	if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
734 		ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW));
735 
736 		proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
737 		    PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
738 		    PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
739 
740 		/*
741 		 * Allocate a page of memory to back the APIC access address for
742 		 * when APICv features are in use.  Guest MMIO accesses should
743 		 * never actually reach this page, but rather be intercepted.
744 		 */
745 		vmx->apic_access_page = kmem_zalloc(PAGESIZE, KM_SLEEP);
746 		VERIFY3U((uintptr_t)vmx->apic_access_page & PAGEOFFSET, ==, 0);
747 		apic_access_pa = vtophys(vmx->apic_access_page);
748 
749 		error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
750 		    apic_access_pa);
751 		/* XXX this should really return an error to the caller */
752 		KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
753 	}
754 	if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) {
755 		ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV));
756 
757 		pin_ctls |= PINBASED_POSTED_INTERRUPT;
758 	}
759 
760 	/* Reflect any enabled defaults in the cap set */
761 	int cap_defaults = 0;
762 	if ((proc_ctls & PROCBASED_HLT_EXITING) != 0) {
763 		cap_defaults |= (1 << VM_CAP_HALT_EXIT);
764 	}
765 	if ((proc_ctls & PROCBASED_PAUSE_EXITING) != 0) {
766 		cap_defaults |= (1 << VM_CAP_PAUSE_EXIT);
767 	}
768 	if ((proc_ctls & PROCBASED_MTF) != 0) {
769 		cap_defaults |= (1 << VM_CAP_MTRAP_EXIT);
770 	}
771 	if ((proc2_ctls & PROCBASED2_ENABLE_INVPCID) != 0) {
772 		cap_defaults |= (1 << VM_CAP_ENABLE_INVPCID);
773 	}
774 
775 	maxcpus = vm_get_maxcpus(vm);
776 	datasel = vmm_get_host_datasel();
777 	for (i = 0; i < maxcpus; i++) {
778 		/*
779 		 * Cache physical address lookups for various components which
780 		 * may be required inside the critical_enter() section implied
781 		 * by VMPTRLD() below.
782 		 */
783 		vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap[i]);
784 		vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]);
785 		vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]);
786 
787 		vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]);
788 		vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]);
789 
790 		vmx_msr_guest_init(vmx, i);
791 
792 		vmcs_load(vmx->vmcs_pa[i]);
793 
794 		vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat());
795 		vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer());
796 
797 		/* Load the control registers */
798 		vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0());
799 		vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE);
800 
801 		/* Load the segment selectors */
802 		vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel());
803 
804 		vmcs_write(VMCS_HOST_ES_SELECTOR, datasel);
805 		vmcs_write(VMCS_HOST_SS_SELECTOR, datasel);
806 		vmcs_write(VMCS_HOST_DS_SELECTOR, datasel);
807 
808 		vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel());
809 		vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel());
810 		vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel());
811 
812 		/*
813 		 * Configure host sysenter MSRs to be restored on VM exit.
814 		 * The thread-specific MSR_INTC_SEP_ESP value is loaded in
815 		 * vmx_run.
816 		 */
817 		vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL);
818 		vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP,
819 		    rdmsr(MSR_SYSENTER_EIP_MSR));
820 
821 		/* instruction pointer */
822 		vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest);
823 
824 		/* link pointer */
825 		vmcs_write(VMCS_LINK_POINTER, ~0);
826 
827 		vmcs_write(VMCS_EPTP, vmx->eptp);
828 		vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls);
829 		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls);
830 
831 		uint32_t use_proc2_ctls = proc2_ctls;
832 		if (cap_wbinvd_exit && vcpu_trap_wbinvd(vm, i) != 0)
833 			use_proc2_ctls |= PROCBASED2_WBINVD_EXITING;
834 		vmcs_write(VMCS_SEC_PROC_BASED_CTLS, use_proc2_ctls);
835 
836 		vmcs_write(VMCS_EXIT_CTLS, exit_ctls);
837 		vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
838 		vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa);
839 		vmcs_write(VMCS_VPID, vpid[i]);
840 
841 		if (guest_l1d_flush && !guest_l1d_flush_sw) {
842 			vmcs_write(VMCS_ENTRY_MSR_LOAD,
843 			    vtophys(&msr_load_list[0]));
844 			vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
845 			    nitems(msr_load_list));
846 			vmcs_write(VMCS_EXIT_MSR_STORE, 0);
847 			vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
848 		}
849 
850 		/* exception bitmap */
851 		if (vcpu_trace_exceptions(vm, i))
852 			exc_bitmap = 0xffffffff;
853 		else
854 			exc_bitmap = 1 << IDT_MC;
855 		vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap);
856 
857 		vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1;
858 		vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
859 
860 		if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) {
861 			vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa);
862 		}
863 
864 		if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
865 			vmcs_write(VMCS_APIC_ACCESS, apic_access_pa);
866 			vmcs_write(VMCS_EOI_EXIT0, 0);
867 			vmcs_write(VMCS_EOI_EXIT1, 0);
868 			vmcs_write(VMCS_EOI_EXIT2, 0);
869 			vmcs_write(VMCS_EOI_EXIT3, 0);
870 		}
871 		if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) {
872 			vmcs_write(VMCS_PIR_VECTOR, pirvec);
873 			vmcs_write(VMCS_PIR_DESC, pir_desc_pa);
874 		}
875 
876 		/*
877 		 * Set up the CR0/4 masks and configure the read shadow state
878 		 * to the power-on register value from the Intel Sys Arch.
879 		 *  CR0 - 0x60000010
880 		 *  CR4 - 0
881 		 */
882 		vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask);
883 		vmcs_write(VMCS_CR0_SHADOW, 0x60000010);
884 		vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask);
885 		vmcs_write(VMCS_CR4_SHADOW, 0);
886 
887 		vmcs_clear(vmx->vmcs_pa[i]);
888 
889 		vmx->cap[i].set = cap_defaults;
890 		vmx->cap[i].proc_ctls = proc_ctls;
891 		vmx->cap[i].proc_ctls2 = proc2_ctls;
892 		vmx->cap[i].exc_bitmap = exc_bitmap;
893 
894 		vmx->state[i].nextrip = ~0;
895 		vmx->state[i].lastcpu = NOCPU;
896 		vmx->state[i].vpid = vpid[i];
897 	}
898 
899 	return (vmx);
900 }
901 
902 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
903 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
904 
905 #define	INVVPID_TYPE_ADDRESS		0UL
906 #define	INVVPID_TYPE_SINGLE_CONTEXT	1UL
907 #define	INVVPID_TYPE_ALL_CONTEXTS	2UL
908 
909 struct invvpid_desc {
910 	uint16_t	vpid;
911 	uint16_t	_res1;
912 	uint32_t	_res2;
913 	uint64_t	linear_addr;
914 };
915 CTASSERT(sizeof (struct invvpid_desc) == 16);
916 
917 static __inline void
918 invvpid(uint64_t type, struct invvpid_desc desc)
919 {
920 	int error;
921 
922 	DTRACE_PROBE3(vmx__invvpid, uint64_t, type, uint16_t, desc.vpid,
923 	    uint64_t, desc.linear_addr);
924 
925 	__asm __volatile("invvpid %[desc], %[type];"
926 	    VMX_SET_ERROR_CODE_ASM
927 	    : [error] "=r" (error)
928 	    : [desc] "m" (desc), [type] "r" (type)
929 	    : "memory");
930 
931 	if (error) {
932 		panic("invvpid error %d", error);
933 	}
934 }
935 
936 /*
937  * Invalidate guest mappings identified by its VPID from the TLB.
938  *
939  * This is effectively a flush of the guest TLB, removing only "combined
940  * mappings" (to use the VMX parlance).  Actions which modify the EPT structures
941  * for the instance (such as unmapping GPAs) would require an 'invept' flush.
942  */
943 static void
944 vmx_invvpid(struct vmx *vmx, int vcpu, int running)
945 {
946 	struct vmxstate *vmxstate;
947 	struct vmspace *vms;
948 
949 	vmxstate = &vmx->state[vcpu];
950 	if (vmxstate->vpid == 0) {
951 		return;
952 	}
953 
954 	if (!running) {
955 		/*
956 		 * Set the 'lastcpu' to an invalid host cpu.
957 		 *
958 		 * This will invalidate TLB entries tagged with the vcpu's
959 		 * vpid the next time it runs via vmx_set_pcpu_defaults().
960 		 */
961 		vmxstate->lastcpu = NOCPU;
962 		return;
963 	}
964 
965 	/*
966 	 * Invalidate all mappings tagged with 'vpid'
967 	 *
968 	 * This is done when a vCPU moves between host CPUs, where there may be
969 	 * stale TLB entries for this VPID on the target, or if emulated actions
970 	 * in the guest CPU have incurred an explicit TLB flush.
971 	 */
972 	vms = vm_get_vmspace(vmx->vm);
973 	if (vmspace_table_gen(vms) == vmx->eptgen[curcpu]) {
974 		struct invvpid_desc invvpid_desc = {
975 			.vpid = vmxstate->vpid,
976 			.linear_addr = 0,
977 			._res1 = 0,
978 			._res2 = 0,
979 		};
980 
981 		invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
982 		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
983 	} else {
984 		/*
985 		 * The INVVPID can be skipped if an INVEPT is going to be
986 		 * performed before entering the guest.  The INVEPT will
987 		 * invalidate combined mappings for the EP4TA associated with
988 		 * this guest, in all VPIDs.
989 		 */
990 		vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
991 	}
992 }
993 
994 static __inline void
995 invept(uint64_t type, uint64_t eptp)
996 {
997 	int error;
998 	struct invept_desc {
999 		uint64_t eptp;
1000 		uint64_t _resv;
1001 	} desc = { eptp, 0 };
1002 
1003 	DTRACE_PROBE2(vmx__invept, uint64_t, type, uint64_t, eptp);
1004 
1005 	__asm __volatile("invept %[desc], %[type];"
1006 	    VMX_SET_ERROR_CODE_ASM
1007 	    : [error] "=r" (error)
1008 	    : [desc] "m" (desc), [type] "r" (type)
1009 	    : "memory");
1010 
1011 	if (error != 0) {
1012 		panic("invvpid error %d", error);
1013 	}
1014 }
1015 
1016 static void
1017 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
1018 {
1019 	struct vmxstate *vmxstate;
1020 
1021 	/*
1022 	 * Regardless of whether the VM appears to have migrated between CPUs,
1023 	 * save the host sysenter stack pointer.  As it points to the kernel
1024 	 * stack of each thread, the correct value must be maintained for every
1025 	 * trip into the critical section.
1026 	 */
1027 	vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR));
1028 
1029 	/*
1030 	 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or
1031 	 * migration between host CPUs with differing TSC values.
1032 	 */
1033 	vmx_apply_tsc_adjust(vmx, vcpu);
1034 
1035 	vmxstate = &vmx->state[vcpu];
1036 	if (vmxstate->lastcpu == curcpu)
1037 		return;
1038 
1039 	vmxstate->lastcpu = curcpu;
1040 
1041 	vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1042 
1043 	/* Load the per-CPU IDT address */
1044 	vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase());
1045 	vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1046 	vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1047 	vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1048 	vmx_invvpid(vmx, vcpu, 1);
1049 }
1050 
1051 static __inline bool
1052 vmx_int_window_exiting(struct vmx *vmx, int vcpu)
1053 {
1054 	return ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0);
1055 }
1056 
1057 static __inline void
1058 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1059 {
1060 	if (!vmx_int_window_exiting(vmx, vcpu)) {
1061 		/* Enable interrupt window exiting */
1062 		vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1063 		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1064 	}
1065 }
1066 
1067 static __inline void
1068 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1069 {
1070 	/* Disable interrupt window exiting */
1071 	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1072 	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1073 }
1074 
1075 static __inline bool
1076 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu)
1077 {
1078 	return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0);
1079 }
1080 
1081 static __inline void
1082 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1083 {
1084 	if (!vmx_nmi_window_exiting(vmx, vcpu)) {
1085 		vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1086 		vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1087 	}
1088 }
1089 
1090 static __inline void
1091 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1092 {
1093 	vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1094 	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1095 }
1096 
1097 /*
1098  * Set the TSC adjustment, taking into account the offsets measured between
1099  * host physical CPUs.  This is required even if the guest has not set a TSC
1100  * offset since vCPUs inherit the TSC offset of whatever physical CPU it has
1101  * migrated onto.  Without this mitigation, un-synched host TSCs will convey
1102  * the appearance of TSC time-travel to the guest as its vCPUs migrate.
1103  */
1104 static void
1105 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu)
1106 {
1107 	const uint64_t offset = vcpu_tsc_offset(vmx->vm, vcpu, true);
1108 
1109 	ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET);
1110 
1111 	if (vmx->tsc_offset_active[vcpu] != offset) {
1112 		vmcs_write(VMCS_TSC_OFFSET, offset);
1113 		vmx->tsc_offset_active[vcpu] = offset;
1114 	}
1115 }
1116 
1117 CTASSERT(VMCS_INTR_T_HWINTR		== VM_INTINFO_HWINTR);
1118 CTASSERT(VMCS_INTR_T_NMI		== VM_INTINFO_NMI);
1119 CTASSERT(VMCS_INTR_T_HWEXCEPTION	== VM_INTINFO_HWEXCP);
1120 CTASSERT(VMCS_INTR_T_SWINTR		== VM_INTINFO_SWINTR);
1121 CTASSERT(VMCS_INTR_T_PRIV_SWEXCEPTION	== VM_INTINFO_RESV5);
1122 CTASSERT(VMCS_INTR_T_SWEXCEPTION	== VM_INTINFO_RESV6);
1123 CTASSERT(VMCS_IDT_VEC_ERRCODE_VALID	== VM_INTINFO_DEL_ERRCODE);
1124 CTASSERT(VMCS_INTR_T_MASK		== VM_INTINFO_MASK_TYPE);
1125 
1126 static uint64_t
1127 vmx_idtvec_to_intinfo(uint32_t info, uint32_t errcode)
1128 {
1129 	ASSERT(info & VMCS_IDT_VEC_VALID);
1130 
1131 	const uint32_t type = info & VMCS_INTR_T_MASK;
1132 	const uint8_t vec = info & 0xff;
1133 
1134 	switch (type) {
1135 	case VMCS_INTR_T_HWINTR:
1136 	case VMCS_INTR_T_NMI:
1137 	case VMCS_INTR_T_HWEXCEPTION:
1138 	case VMCS_INTR_T_SWINTR:
1139 	case VMCS_INTR_T_PRIV_SWEXCEPTION:
1140 	case VMCS_INTR_T_SWEXCEPTION:
1141 		break;
1142 	default:
1143 		panic("unexpected event type 0x%03x", type);
1144 	}
1145 
1146 	uint64_t intinfo = VM_INTINFO_VALID | type | vec;
1147 	if (info & VMCS_IDT_VEC_ERRCODE_VALID) {
1148 		intinfo |= (uint64_t)errcode << 32;
1149 	}
1150 
1151 	return (intinfo);
1152 }
1153 
1154 CTASSERT(VMCS_INTR_DEL_ERRCODE		== VMCS_IDT_VEC_ERRCODE_VALID);
1155 CTASSERT(VMCS_INTR_VALID		== VMCS_IDT_VEC_VALID);
1156 
1157 /*
1158  * Store VMX-specific event injection info for later handling.  This depends on
1159  * the bhyve-internal event definitions matching those in the VMCS, as ensured
1160  * by the vmx_idtvec_to_intinfo() and the related CTASSERTs.
1161  */
1162 static void
1163 vmx_stash_intinfo(struct vmx *vmx, int vcpu)
1164 {
1165 	uint64_t info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1166 	if ((info & VMCS_INTR_VALID) != 0) {
1167 		uint32_t errcode = 0;
1168 
1169 		if ((info & VMCS_INTR_DEL_ERRCODE) != 0) {
1170 			errcode = vmcs_read(VMCS_ENTRY_EXCEPTION_ERROR);
1171 		}
1172 
1173 		VERIFY0(vm_exit_intinfo(vmx->vm, vcpu,
1174 		    vmx_idtvec_to_intinfo(info, errcode)));
1175 
1176 		vmcs_write(VMCS_ENTRY_INTR_INFO, 0);
1177 		vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, 0);
1178 	}
1179 }
1180 
1181 static void
1182 vmx_inject_intinfo(uint64_t info)
1183 {
1184 	ASSERT(VM_INTINFO_PENDING(info));
1185 	ASSERT0(info & VM_INTINFO_MASK_RSVD);
1186 
1187 	/*
1188 	 * The bhyve format matches that of the VMCS, which is ensured by the
1189 	 * CTASSERTs above.
1190 	 */
1191 	uint32_t inject = info;
1192 	switch (VM_INTINFO_VECTOR(info)) {
1193 	case IDT_BP:
1194 	case IDT_OF:
1195 		/*
1196 		 * VT-x requires #BP and #OF to be injected as software
1197 		 * exceptions.
1198 		 */
1199 		inject &= ~VMCS_INTR_T_MASK;
1200 		inject |= VMCS_INTR_T_SWEXCEPTION;
1201 		break;
1202 	default:
1203 		break;
1204 	}
1205 
1206 	if (VM_INTINFO_HAS_ERRCODE(info)) {
1207 		vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
1208 		    VM_INTINFO_ERRCODE(info));
1209 	}
1210 	vmcs_write(VMCS_ENTRY_INTR_INFO, inject);
1211 }
1212 
1213 #define	NMI_BLOCKING	(VMCS_INTERRUPTIBILITY_NMI_BLOCKING |		\
1214 			VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1215 #define	HWINTR_BLOCKING	(VMCS_INTERRUPTIBILITY_STI_BLOCKING |		\
1216 			VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1217 
1218 static void
1219 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1220 {
1221 	ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING);
1222 	ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID);
1223 
1224 	/*
1225 	 * Inject the virtual NMI. The vector must be the NMI IDT entry
1226 	 * or the VMCS entry check will fail.
1227 	 */
1228 	vmcs_write(VMCS_ENTRY_INTR_INFO,
1229 	    IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID);
1230 
1231 	/* Clear the request */
1232 	vm_nmi_clear(vmx->vm, vcpu);
1233 }
1234 
1235 /*
1236  * Inject exceptions, NMIs, and ExtINTs.
1237  *
1238  * The logic behind these are complicated and may involve mutex contention, so
1239  * the injection is performed without the protection of host CPU interrupts
1240  * being disabled.  This means a racing notification could be "lost",
1241  * necessitating a later call to vmx_inject_recheck() to close that window
1242  * of opportunity.
1243  */
1244 static enum event_inject_state
1245 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip)
1246 {
1247 	uint64_t entryinfo;
1248 	uint32_t gi, info;
1249 	int vector;
1250 	enum event_inject_state state;
1251 
1252 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1253 	info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1254 	state = EIS_CAN_INJECT;
1255 
1256 	/* Clear any interrupt blocking if the guest %rip has changed */
1257 	if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) {
1258 		gi &= ~HWINTR_BLOCKING;
1259 		vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1260 	}
1261 
1262 	/*
1263 	 * It could be that an interrupt is already pending for injection from
1264 	 * the VMCS.  This would be the case if the vCPU exited for conditions
1265 	 * such as an AST before a vm-entry delivered the injection.
1266 	 */
1267 	if ((info & VMCS_INTR_VALID) != 0) {
1268 		return (EIS_EV_EXISTING | EIS_REQ_EXIT);
1269 	}
1270 
1271 	if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1272 		vmx_inject_intinfo(entryinfo);
1273 		state = EIS_EV_INJECTED;
1274 	}
1275 
1276 	if (vm_nmi_pending(vmx->vm, vcpu)) {
1277 		/*
1278 		 * If there are no conditions blocking NMI injection then inject
1279 		 * it directly here otherwise enable "NMI window exiting" to
1280 		 * inject it as soon as we can.
1281 		 *
1282 		 * According to the Intel manual, some CPUs do not allow NMI
1283 		 * injection when STI_BLOCKING is active.  That check is
1284 		 * enforced here, regardless of CPU capability.  If running on a
1285 		 * CPU without such a restriction it will immediately exit and
1286 		 * the NMI will be injected in the "NMI window exiting" handler.
1287 		 */
1288 		if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1289 			if (state == EIS_CAN_INJECT) {
1290 				vmx_inject_nmi(vmx, vcpu);
1291 				state = EIS_EV_INJECTED;
1292 			} else {
1293 				return (state | EIS_REQ_EXIT);
1294 			}
1295 		} else {
1296 			vmx_set_nmi_window_exiting(vmx, vcpu);
1297 		}
1298 	}
1299 
1300 	if (vm_extint_pending(vmx->vm, vcpu)) {
1301 		if (state != EIS_CAN_INJECT) {
1302 			return (state | EIS_REQ_EXIT);
1303 		}
1304 		if ((gi & HWINTR_BLOCKING) != 0 ||
1305 		    (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) {
1306 			return (EIS_GI_BLOCK);
1307 		}
1308 
1309 		/* Ask the legacy pic for a vector to inject */
1310 		vatpic_pending_intr(vmx->vm, &vector);
1311 
1312 		/*
1313 		 * From the Intel SDM, Volume 3, Section "Maskable
1314 		 * Hardware Interrupts":
1315 		 * - maskable interrupt vectors [0,255] can be delivered
1316 		 *   through the INTR pin.
1317 		 */
1318 		KASSERT(vector >= 0 && vector <= 255,
1319 		    ("invalid vector %d from INTR", vector));
1320 
1321 		/* Inject the interrupt */
1322 		vmcs_write(VMCS_ENTRY_INTR_INFO,
1323 		    VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector);
1324 
1325 		vm_extint_clear(vmx->vm, vcpu);
1326 		vatpic_intr_accepted(vmx->vm, vector);
1327 		state = EIS_EV_INJECTED;
1328 	}
1329 
1330 	return (state);
1331 }
1332 
1333 /*
1334  * Inject any interrupts pending on the vLAPIC.
1335  *
1336  * This is done with host CPU interrupts disabled so notification IPIs, either
1337  * from the standard vCPU notification or APICv posted interrupts, will be
1338  * queued on the host APIC and recognized when entering VMX context.
1339  */
1340 static enum event_inject_state
1341 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1342 {
1343 	int vector;
1344 
1345 	if (!vlapic_pending_intr(vlapic, &vector)) {
1346 		return (EIS_CAN_INJECT);
1347 	}
1348 
1349 	/*
1350 	 * From the Intel SDM, Volume 3, Section "Maskable
1351 	 * Hardware Interrupts":
1352 	 * - maskable interrupt vectors [16,255] can be delivered
1353 	 *   through the local APIC.
1354 	 */
1355 	KASSERT(vector >= 16 && vector <= 255,
1356 	    ("invalid vector %d from local APIC", vector));
1357 
1358 	if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
1359 		uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
1360 		uint16_t status_new = (status_old & 0xff00) | vector;
1361 
1362 		/*
1363 		 * The APICv state will have been synced into the vLAPIC
1364 		 * as part of vlapic_pending_intr().  Prepare the VMCS
1365 		 * for the to-be-injected pending interrupt.
1366 		 */
1367 		if (status_new > status_old) {
1368 			vmcs_write(VMCS_GUEST_INTR_STATUS, status_new);
1369 		}
1370 
1371 		/*
1372 		 * Ensure VMCS state regarding EOI traps is kept in sync
1373 		 * with the TMRs in the vlapic.
1374 		 */
1375 		vmx_apicv_sync_tmr(vlapic);
1376 
1377 		/*
1378 		 * The rest of the injection process for injecting the
1379 		 * interrupt(s) is handled by APICv. It does not preclude other
1380 		 * event injection from occurring.
1381 		 */
1382 		return (EIS_CAN_INJECT);
1383 	}
1384 
1385 	ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID);
1386 
1387 	/* Does guest interruptability block injection? */
1388 	if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 ||
1389 	    (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) {
1390 		return (EIS_GI_BLOCK);
1391 	}
1392 
1393 	/* Inject the interrupt */
1394 	vmcs_write(VMCS_ENTRY_INTR_INFO,
1395 	    VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector);
1396 
1397 	/* Update the Local APIC ISR */
1398 	vlapic_intr_accepted(vlapic, vector);
1399 
1400 	return (EIS_EV_INJECTED);
1401 }
1402 
1403 /*
1404  * Re-check for events to be injected.
1405  *
1406  * Once host CPU interrupts are disabled, check for the presence of any events
1407  * which require injection processing.  If an exit is required upon injection,
1408  * or once the guest becomes interruptable, that will be configured too.
1409  */
1410 static bool
1411 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state)
1412 {
1413 	if (state == EIS_CAN_INJECT) {
1414 		if (vm_nmi_pending(vmx->vm, vcpu) &&
1415 		    !vmx_nmi_window_exiting(vmx, vcpu)) {
1416 			/* queued NMI not blocked by NMI-window-exiting */
1417 			return (true);
1418 		}
1419 		if (vm_extint_pending(vmx->vm, vcpu)) {
1420 			/* queued ExtINT not blocked by existing injection */
1421 			return (true);
1422 		}
1423 	} else {
1424 		if ((state & EIS_REQ_EXIT) != 0) {
1425 			/*
1426 			 * Use a self-IPI to force an immediate exit after
1427 			 * event injection has occurred.
1428 			 */
1429 			poke_cpu(CPU->cpu_id);
1430 		} else {
1431 			/*
1432 			 * If any event is being injected, an exit immediately
1433 			 * upon becoming interruptable again will allow pending
1434 			 * or newly queued events to be injected in a timely
1435 			 * manner.
1436 			 */
1437 			vmx_set_int_window_exiting(vmx, vcpu);
1438 		}
1439 	}
1440 	return (false);
1441 }
1442 
1443 /*
1444  * If the Virtual NMIs execution control is '1' then the logical processor
1445  * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1446  * the VMCS. An IRET instruction in VMX non-root operation will remove any
1447  * virtual-NMI blocking.
1448  *
1449  * This unblocking occurs even if the IRET causes a fault. In this case the
1450  * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1451  */
1452 static void
1453 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1454 {
1455 	uint32_t gi;
1456 
1457 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1458 	gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1459 	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1460 }
1461 
1462 static void
1463 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1464 {
1465 	uint32_t gi;
1466 
1467 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1468 	gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1469 	vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1470 }
1471 
1472 static void
1473 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1474 {
1475 	uint32_t gi;
1476 
1477 	gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1478 	KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1479 	    ("NMI blocking is not in effect %x", gi));
1480 }
1481 
1482 static int
1483 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1484 {
1485 	struct vmxctx *vmxctx;
1486 	uint64_t xcrval;
1487 	const struct xsave_limits *limits;
1488 
1489 	vmxctx = &vmx->ctx[vcpu];
1490 	limits = vmm_get_xsave_limits();
1491 
1492 	/*
1493 	 * Note that the processor raises a GP# fault on its own if
1494 	 * xsetbv is executed for CPL != 0, so we do not have to
1495 	 * emulate that fault here.
1496 	 */
1497 
1498 	/* Only xcr0 is supported. */
1499 	if (vmxctx->guest_rcx != 0) {
1500 		vm_inject_gp(vmx->vm, vcpu);
1501 		return (HANDLED);
1502 	}
1503 
1504 	/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1505 	if (!limits->xsave_enabled ||
1506 	    !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1507 		vm_inject_ud(vmx->vm, vcpu);
1508 		return (HANDLED);
1509 	}
1510 
1511 	xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1512 	if ((xcrval & ~limits->xcr0_allowed) != 0) {
1513 		vm_inject_gp(vmx->vm, vcpu);
1514 		return (HANDLED);
1515 	}
1516 
1517 	if (!(xcrval & XFEATURE_ENABLED_X87)) {
1518 		vm_inject_gp(vmx->vm, vcpu);
1519 		return (HANDLED);
1520 	}
1521 
1522 	/* AVX (YMM_Hi128) requires SSE. */
1523 	if (xcrval & XFEATURE_ENABLED_AVX &&
1524 	    (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1525 		vm_inject_gp(vmx->vm, vcpu);
1526 		return (HANDLED);
1527 	}
1528 
1529 	/*
1530 	 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1531 	 * ZMM_Hi256, and Hi16_ZMM.
1532 	 */
1533 	if (xcrval & XFEATURE_AVX512 &&
1534 	    (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1535 	    (XFEATURE_AVX512 | XFEATURE_AVX)) {
1536 		vm_inject_gp(vmx->vm, vcpu);
1537 		return (HANDLED);
1538 	}
1539 
1540 	/*
1541 	 * Intel MPX requires both bound register state flags to be
1542 	 * set.
1543 	 */
1544 	if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1545 	    ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1546 		vm_inject_gp(vmx->vm, vcpu);
1547 		return (HANDLED);
1548 	}
1549 
1550 	/*
1551 	 * This runs "inside" vmrun() with the guest's FPU state, so
1552 	 * modifying xcr0 directly modifies the guest's xcr0, not the
1553 	 * host's.
1554 	 */
1555 	load_xcr(0, xcrval);
1556 	return (HANDLED);
1557 }
1558 
1559 static uint64_t
1560 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1561 {
1562 	const struct vmxctx *vmxctx;
1563 
1564 	vmxctx = &vmx->ctx[vcpu];
1565 
1566 	switch (ident) {
1567 	case 0:
1568 		return (vmxctx->guest_rax);
1569 	case 1:
1570 		return (vmxctx->guest_rcx);
1571 	case 2:
1572 		return (vmxctx->guest_rdx);
1573 	case 3:
1574 		return (vmxctx->guest_rbx);
1575 	case 4:
1576 		return (vmcs_read(VMCS_GUEST_RSP));
1577 	case 5:
1578 		return (vmxctx->guest_rbp);
1579 	case 6:
1580 		return (vmxctx->guest_rsi);
1581 	case 7:
1582 		return (vmxctx->guest_rdi);
1583 	case 8:
1584 		return (vmxctx->guest_r8);
1585 	case 9:
1586 		return (vmxctx->guest_r9);
1587 	case 10:
1588 		return (vmxctx->guest_r10);
1589 	case 11:
1590 		return (vmxctx->guest_r11);
1591 	case 12:
1592 		return (vmxctx->guest_r12);
1593 	case 13:
1594 		return (vmxctx->guest_r13);
1595 	case 14:
1596 		return (vmxctx->guest_r14);
1597 	case 15:
1598 		return (vmxctx->guest_r15);
1599 	default:
1600 		panic("invalid vmx register %d", ident);
1601 	}
1602 }
1603 
1604 static void
1605 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1606 {
1607 	struct vmxctx *vmxctx;
1608 
1609 	vmxctx = &vmx->ctx[vcpu];
1610 
1611 	switch (ident) {
1612 	case 0:
1613 		vmxctx->guest_rax = regval;
1614 		break;
1615 	case 1:
1616 		vmxctx->guest_rcx = regval;
1617 		break;
1618 	case 2:
1619 		vmxctx->guest_rdx = regval;
1620 		break;
1621 	case 3:
1622 		vmxctx->guest_rbx = regval;
1623 		break;
1624 	case 4:
1625 		vmcs_write(VMCS_GUEST_RSP, regval);
1626 		break;
1627 	case 5:
1628 		vmxctx->guest_rbp = regval;
1629 		break;
1630 	case 6:
1631 		vmxctx->guest_rsi = regval;
1632 		break;
1633 	case 7:
1634 		vmxctx->guest_rdi = regval;
1635 		break;
1636 	case 8:
1637 		vmxctx->guest_r8 = regval;
1638 		break;
1639 	case 9:
1640 		vmxctx->guest_r9 = regval;
1641 		break;
1642 	case 10:
1643 		vmxctx->guest_r10 = regval;
1644 		break;
1645 	case 11:
1646 		vmxctx->guest_r11 = regval;
1647 		break;
1648 	case 12:
1649 		vmxctx->guest_r12 = regval;
1650 		break;
1651 	case 13:
1652 		vmxctx->guest_r13 = regval;
1653 		break;
1654 	case 14:
1655 		vmxctx->guest_r14 = regval;
1656 		break;
1657 	case 15:
1658 		vmxctx->guest_r15 = regval;
1659 		break;
1660 	default:
1661 		panic("invalid vmx register %d", ident);
1662 	}
1663 }
1664 
1665 static void
1666 vmx_sync_efer_state(struct vmx *vmx, int vcpu, uint64_t efer)
1667 {
1668 	uint64_t ctrl;
1669 
1670 	/*
1671 	 * If the "load EFER" VM-entry control is 1 (which we require) then the
1672 	 * value of EFER.LMA must be identical to "IA-32e mode guest" bit in the
1673 	 * VM-entry control.
1674 	 */
1675 	ctrl = vmcs_read(VMCS_ENTRY_CTLS);
1676 	if ((efer & EFER_LMA) != 0) {
1677 		ctrl |= VM_ENTRY_GUEST_LMA;
1678 	} else {
1679 		ctrl &= ~VM_ENTRY_GUEST_LMA;
1680 	}
1681 	vmcs_write(VMCS_ENTRY_CTLS, ctrl);
1682 }
1683 
1684 static int
1685 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1686 {
1687 	uint64_t crval, regval;
1688 
1689 	/* We only handle mov to %cr0 at this time */
1690 	if ((exitqual & 0xf0) != 0x00)
1691 		return (UNHANDLED);
1692 
1693 	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1694 
1695 	vmcs_write(VMCS_CR0_SHADOW, regval);
1696 
1697 	crval = regval | cr0_ones_mask;
1698 	crval &= ~cr0_zeros_mask;
1699 
1700 	const uint64_t old = vmcs_read(VMCS_GUEST_CR0);
1701 	const uint64_t diff = crval ^ old;
1702 	/* Flush the TLB if the paging or write-protect bits are changing */
1703 	if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) {
1704 		vmx_invvpid(vmx, vcpu, 1);
1705 	}
1706 
1707 	vmcs_write(VMCS_GUEST_CR0, crval);
1708 
1709 	if (regval & CR0_PG) {
1710 		uint64_t efer;
1711 
1712 		/* Keep EFER.LMA properly updated if paging is enabled */
1713 		efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1714 		if (efer & EFER_LME) {
1715 			efer |= EFER_LMA;
1716 			vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1717 			vmx_sync_efer_state(vmx, vcpu, efer);
1718 		}
1719 	}
1720 
1721 	return (HANDLED);
1722 }
1723 
1724 static int
1725 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1726 {
1727 	uint64_t crval, regval;
1728 
1729 	/* We only handle mov to %cr4 at this time */
1730 	if ((exitqual & 0xf0) != 0x00)
1731 		return (UNHANDLED);
1732 
1733 	regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1734 
1735 	vmcs_write(VMCS_CR4_SHADOW, regval);
1736 
1737 	crval = regval | cr4_ones_mask;
1738 	crval &= ~cr4_zeros_mask;
1739 	vmcs_write(VMCS_GUEST_CR4, crval);
1740 
1741 	return (HANDLED);
1742 }
1743 
1744 static int
1745 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1746 {
1747 	struct vlapic *vlapic;
1748 	uint64_t cr8;
1749 	int regnum;
1750 
1751 	/* We only handle mov %cr8 to/from a register at this time. */
1752 	if ((exitqual & 0xe0) != 0x00) {
1753 		return (UNHANDLED);
1754 	}
1755 
1756 	vlapic = vm_lapic(vmx->vm, vcpu);
1757 	regnum = (exitqual >> 8) & 0xf;
1758 	if (exitqual & 0x10) {
1759 		cr8 = vlapic_get_cr8(vlapic);
1760 		vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1761 	} else {
1762 		cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1763 		vlapic_set_cr8(vlapic, cr8);
1764 	}
1765 
1766 	return (HANDLED);
1767 }
1768 
1769 /*
1770  * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1771  */
1772 static int
1773 vmx_cpl(void)
1774 {
1775 	uint32_t ssar;
1776 
1777 	ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1778 	return ((ssar >> 5) & 0x3);
1779 }
1780 
1781 static enum vm_cpu_mode
1782 vmx_cpu_mode(void)
1783 {
1784 	uint32_t csar;
1785 
1786 	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1787 		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1788 		if (csar & 0x2000)
1789 			return (CPU_MODE_64BIT);	/* CS.L = 1 */
1790 		else
1791 			return (CPU_MODE_COMPATIBILITY);
1792 	} else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1793 		return (CPU_MODE_PROTECTED);
1794 	} else {
1795 		return (CPU_MODE_REAL);
1796 	}
1797 }
1798 
1799 static enum vm_paging_mode
1800 vmx_paging_mode(void)
1801 {
1802 
1803 	if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1804 		return (PAGING_MODE_FLAT);
1805 	if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1806 		return (PAGING_MODE_32);
1807 	if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1808 		return (PAGING_MODE_64);
1809 	else
1810 		return (PAGING_MODE_PAE);
1811 }
1812 
1813 static void
1814 vmx_paging_info(struct vm_guest_paging *paging)
1815 {
1816 	paging->cr3 = vmcs_read(VMCS_GUEST_CR3);
1817 	paging->cpl = vmx_cpl();
1818 	paging->cpu_mode = vmx_cpu_mode();
1819 	paging->paging_mode = vmx_paging_mode();
1820 }
1821 
1822 static void
1823 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa,
1824     uint64_t gla)
1825 {
1826 	struct vm_guest_paging paging;
1827 	uint32_t csar;
1828 
1829 	vmexit->exitcode = VM_EXITCODE_MMIO_EMUL;
1830 	vmexit->inst_length = 0;
1831 	vmexit->u.mmio_emul.gpa = gpa;
1832 	vmexit->u.mmio_emul.gla = gla;
1833 	vmx_paging_info(&paging);
1834 
1835 	switch (paging.cpu_mode) {
1836 	case CPU_MODE_REAL:
1837 		vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1838 		vmexit->u.mmio_emul.cs_d = 0;
1839 		break;
1840 	case CPU_MODE_PROTECTED:
1841 	case CPU_MODE_COMPATIBILITY:
1842 		vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1843 		csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1844 		vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar);
1845 		break;
1846 	default:
1847 		vmexit->u.mmio_emul.cs_base = 0;
1848 		vmexit->u.mmio_emul.cs_d = 0;
1849 		break;
1850 	}
1851 
1852 	vie_init_mmio(vie, NULL, 0, &paging, gpa);
1853 }
1854 
1855 static void
1856 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual,
1857     uint32_t eax)
1858 {
1859 	struct vm_guest_paging paging;
1860 	struct vm_inout *inout;
1861 
1862 	inout = &vmexit->u.inout;
1863 
1864 	inout->bytes = (qual & 0x7) + 1;
1865 	inout->flags = 0;
1866 	inout->flags |= (qual & 0x8) ? INOUT_IN : 0;
1867 	inout->flags |= (qual & 0x10) ? INOUT_STR : 0;
1868 	inout->flags |= (qual & 0x20) ? INOUT_REP : 0;
1869 	inout->port = (uint16_t)(qual >> 16);
1870 	inout->eax = eax;
1871 	if (inout->flags & INOUT_STR) {
1872 		uint64_t inst_info;
1873 
1874 		inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
1875 
1876 		/*
1877 		 * According to the SDM, bits 9:7 encode the address size of the
1878 		 * ins/outs operation, but only values 0/1/2 are expected,
1879 		 * corresponding to 16/32/64 bit sizes.
1880 		 */
1881 		inout->addrsize = 2 << BITX(inst_info, 9, 7);
1882 		VERIFY(inout->addrsize == 2 || inout->addrsize == 4 ||
1883 		    inout->addrsize == 8);
1884 
1885 		if (inout->flags & INOUT_IN) {
1886 			/*
1887 			 * The bits describing the segment in INSTRUCTION_INFO
1888 			 * are not defined for ins, leaving it to system
1889 			 * software to assume %es (encoded as 0)
1890 			 */
1891 			inout->segment = 0;
1892 		} else {
1893 			/*
1894 			 * Bits 15-17 encode the segment for OUTS.
1895 			 * This value follows the standard x86 segment order.
1896 			 */
1897 			inout->segment = (inst_info >> 15) & 0x7;
1898 		}
1899 	}
1900 
1901 	vmexit->exitcode = VM_EXITCODE_INOUT;
1902 	vmx_paging_info(&paging);
1903 	vie_init_inout(vie, inout, vmexit->inst_length, &paging);
1904 
1905 	/* The in/out emulation will handle advancing %rip */
1906 	vmexit->inst_length = 0;
1907 }
1908 
1909 static int
1910 ept_fault_type(uint64_t ept_qual)
1911 {
1912 	int fault_type;
1913 
1914 	if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1915 		fault_type = PROT_WRITE;
1916 	else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1917 		fault_type = PROT_EXEC;
1918 	else
1919 		fault_type = PROT_READ;
1920 
1921 	return (fault_type);
1922 }
1923 
1924 static bool
1925 ept_emulation_fault(uint64_t ept_qual)
1926 {
1927 	int read, write;
1928 
1929 	/* EPT fault on an instruction fetch doesn't make sense here */
1930 	if (ept_qual & EPT_VIOLATION_INST_FETCH)
1931 		return (false);
1932 
1933 	/* EPT fault must be a read fault or a write fault */
1934 	read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1935 	write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1936 	if ((read | write) == 0)
1937 		return (false);
1938 
1939 	/*
1940 	 * The EPT violation must have been caused by accessing a
1941 	 * guest-physical address that is a translation of a guest-linear
1942 	 * address.
1943 	 */
1944 	if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1945 	    (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1946 		return (false);
1947 	}
1948 
1949 	return (true);
1950 }
1951 
1952 static __inline int
1953 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1954 {
1955 	uint32_t proc_ctls2;
1956 
1957 	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1958 	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1959 }
1960 
1961 static __inline int
1962 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1963 {
1964 	uint32_t proc_ctls2;
1965 
1966 	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1967 	return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1968 }
1969 
1970 static int
1971 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1972     uint64_t qual)
1973 {
1974 	const uint_t offset = APIC_WRITE_OFFSET(qual);
1975 
1976 	if (!apic_access_virtualization(vmx, vcpuid)) {
1977 		/*
1978 		 * In general there should not be any APIC write VM-exits
1979 		 * unless APIC-access virtualization is enabled.
1980 		 *
1981 		 * However self-IPI virtualization can legitimately trigger
1982 		 * an APIC-write VM-exit so treat it specially.
1983 		 */
1984 		if (x2apic_virtualization(vmx, vcpuid) &&
1985 		    offset == APIC_OFFSET_SELF_IPI) {
1986 			const uint32_t *apic_regs =
1987 			    (uint32_t *)(vlapic->apic_page);
1988 			const uint32_t vector =
1989 			    apic_regs[APIC_OFFSET_SELF_IPI / 4];
1990 
1991 			vlapic_self_ipi_handler(vlapic, vector);
1992 			return (HANDLED);
1993 		} else
1994 			return (UNHANDLED);
1995 	}
1996 
1997 	switch (offset) {
1998 	case APIC_OFFSET_ID:
1999 		vlapic_id_write_handler(vlapic);
2000 		break;
2001 	case APIC_OFFSET_LDR:
2002 		vlapic_ldr_write_handler(vlapic);
2003 		break;
2004 	case APIC_OFFSET_DFR:
2005 		vlapic_dfr_write_handler(vlapic);
2006 		break;
2007 	case APIC_OFFSET_SVR:
2008 		vlapic_svr_write_handler(vlapic);
2009 		break;
2010 	case APIC_OFFSET_ESR:
2011 		vlapic_esr_write_handler(vlapic);
2012 		break;
2013 	case APIC_OFFSET_ICR_LOW:
2014 		vlapic_icrlo_write_handler(vlapic);
2015 		break;
2016 	case APIC_OFFSET_CMCI_LVT:
2017 	case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
2018 		vlapic_lvt_write_handler(vlapic, offset);
2019 		break;
2020 	case APIC_OFFSET_TIMER_ICR:
2021 		vlapic_icrtmr_write_handler(vlapic);
2022 		break;
2023 	case APIC_OFFSET_TIMER_DCR:
2024 		vlapic_dcr_write_handler(vlapic);
2025 		break;
2026 	default:
2027 		return (UNHANDLED);
2028 	}
2029 	return (HANDLED);
2030 }
2031 
2032 static bool
2033 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
2034 {
2035 
2036 	if (apic_access_virtualization(vmx, vcpuid) &&
2037 	    (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
2038 		return (true);
2039 	else
2040 		return (false);
2041 }
2042 
2043 static int
2044 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2045 {
2046 	uint64_t qual;
2047 	int access_type, offset, allowed;
2048 	struct vie *vie;
2049 
2050 	if (!apic_access_virtualization(vmx, vcpuid))
2051 		return (UNHANDLED);
2052 
2053 	qual = vmexit->u.vmx.exit_qualification;
2054 	access_type = APIC_ACCESS_TYPE(qual);
2055 	offset = APIC_ACCESS_OFFSET(qual);
2056 
2057 	allowed = 0;
2058 	if (access_type == 0) {
2059 		/*
2060 		 * Read data access to the following registers is expected.
2061 		 */
2062 		switch (offset) {
2063 		case APIC_OFFSET_APR:
2064 		case APIC_OFFSET_PPR:
2065 		case APIC_OFFSET_RRR:
2066 		case APIC_OFFSET_CMCI_LVT:
2067 		case APIC_OFFSET_TIMER_CCR:
2068 			allowed = 1;
2069 			break;
2070 		default:
2071 			break;
2072 		}
2073 	} else if (access_type == 1) {
2074 		/*
2075 		 * Write data access to the following registers is expected.
2076 		 */
2077 		switch (offset) {
2078 		case APIC_OFFSET_VER:
2079 		case APIC_OFFSET_APR:
2080 		case APIC_OFFSET_PPR:
2081 		case APIC_OFFSET_RRR:
2082 		case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2083 		case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2084 		case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2085 		case APIC_OFFSET_CMCI_LVT:
2086 		case APIC_OFFSET_TIMER_CCR:
2087 			allowed = 1;
2088 			break;
2089 		default:
2090 			break;
2091 		}
2092 	}
2093 
2094 	if (allowed) {
2095 		vie = vm_vie_ctx(vmx->vm, vcpuid);
2096 		vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset,
2097 		    VIE_INVALID_GLA);
2098 	}
2099 
2100 	/*
2101 	 * Regardless of whether the APIC-access is allowed this handler
2102 	 * always returns UNHANDLED:
2103 	 * - if the access is allowed then it is handled by emulating the
2104 	 *   instruction that caused the VM-exit (outside the critical section)
2105 	 * - if the access is not allowed then it will be converted to an
2106 	 *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2107 	 */
2108 	return (UNHANDLED);
2109 }
2110 
2111 static enum task_switch_reason
2112 vmx_task_switch_reason(uint64_t qual)
2113 {
2114 	int reason;
2115 
2116 	reason = (qual >> 30) & 0x3;
2117 	switch (reason) {
2118 	case 0:
2119 		return (TSR_CALL);
2120 	case 1:
2121 		return (TSR_IRET);
2122 	case 2:
2123 		return (TSR_JMP);
2124 	case 3:
2125 		return (TSR_IDT_GATE);
2126 	default:
2127 		panic("%s: invalid reason %d", __func__, reason);
2128 	}
2129 }
2130 
2131 static int
2132 vmx_handle_msr(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit,
2133     bool is_wrmsr)
2134 {
2135 	struct vmxctx *vmxctx = &vmx->ctx[vcpuid];
2136 	const uint32_t ecx = vmxctx->guest_rcx;
2137 	vm_msr_result_t res;
2138 	uint64_t val = 0;
2139 
2140 	if (is_wrmsr) {
2141 		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1);
2142 		val = vmxctx->guest_rdx << 32 | (uint32_t)vmxctx->guest_rax;
2143 
2144 		if (vlapic_owned_msr(ecx)) {
2145 			struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid);
2146 
2147 			res = vlapic_wrmsr(vlapic, ecx, val);
2148 		} else {
2149 			res = vmx_wrmsr(vmx, vcpuid, ecx, val);
2150 		}
2151 	} else {
2152 		vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1);
2153 
2154 		if (vlapic_owned_msr(ecx)) {
2155 			struct vlapic *vlapic = vm_lapic(vmx->vm, vcpuid);
2156 
2157 			res = vlapic_rdmsr(vlapic, ecx, &val);
2158 		} else {
2159 			res = vmx_rdmsr(vmx, vcpuid, ecx, &val);
2160 		}
2161 	}
2162 
2163 	switch (res) {
2164 	case VMR_OK:
2165 		/* Store rdmsr result in the appropriate registers */
2166 		if (!is_wrmsr) {
2167 			vmxctx->guest_rax = (uint32_t)val;
2168 			vmxctx->guest_rdx = val >> 32;
2169 		}
2170 		return (HANDLED);
2171 	case VMR_GP:
2172 		vm_inject_gp(vmx->vm, vcpuid);
2173 		return (HANDLED);
2174 	case VMR_UNHANLDED:
2175 		vmexit->exitcode = is_wrmsr ?
2176 		    VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR;
2177 		vmexit->u.msr.code = ecx;
2178 		vmexit->u.msr.wval = val;
2179 		return (UNHANDLED);
2180 	default:
2181 		panic("unexpected msr result %u\n", res);
2182 	}
2183 }
2184 
2185 static int
2186 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2187 {
2188 	int error, errcode, errcode_valid, handled;
2189 	struct vmxctx *vmxctx;
2190 	struct vie *vie;
2191 	struct vlapic *vlapic;
2192 	struct vm_task_switch *ts;
2193 	uint32_t idtvec_info, intr_info;
2194 	uint32_t intr_type, intr_vec, reason;
2195 	uint64_t qual, gpa;
2196 
2197 	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2198 	CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2199 
2200 	handled = UNHANDLED;
2201 	vmxctx = &vmx->ctx[vcpu];
2202 
2203 	qual = vmexit->u.vmx.exit_qualification;
2204 	reason = vmexit->u.vmx.exit_reason;
2205 	vmexit->exitcode = VM_EXITCODE_BOGUS;
2206 
2207 	vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2208 	SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit);
2209 
2210 	/*
2211 	 * VM-entry failures during or after loading guest state.
2212 	 *
2213 	 * These VM-exits are uncommon but must be handled specially
2214 	 * as most VM-exit fields are not populated as usual.
2215 	 */
2216 	if (reason == EXIT_REASON_MCE_DURING_ENTRY) {
2217 		vmm_call_trap(T_MCE);
2218 		return (1);
2219 	}
2220 
2221 	/*
2222 	 * VM exits that can be triggered during event delivery need to
2223 	 * be handled specially by re-injecting the event if the IDT
2224 	 * vectoring information field's valid bit is set.
2225 	 *
2226 	 * See "Information for VM Exits During Event Delivery" in Intel SDM
2227 	 * for details.
2228 	 */
2229 	idtvec_info = vmcs_read(VMCS_IDT_VECTORING_INFO);
2230 	if (idtvec_info & VMCS_IDT_VEC_VALID) {
2231 		uint32_t errcode = 0;
2232 		if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2233 			errcode = vmcs_read(VMCS_IDT_VECTORING_ERROR);
2234 		}
2235 
2236 		/* Record exit intinfo */
2237 		VERIFY0(vm_exit_intinfo(vmx->vm, vcpu,
2238 		    vmx_idtvec_to_intinfo(idtvec_info, errcode)));
2239 
2240 		/*
2241 		 * If 'virtual NMIs' are being used and the VM-exit
2242 		 * happened while injecting an NMI during the previous
2243 		 * VM-entry, then clear "blocking by NMI" in the
2244 		 * Guest Interruptibility-State so the NMI can be
2245 		 * reinjected on the subsequent VM-entry.
2246 		 *
2247 		 * However, if the NMI was being delivered through a task
2248 		 * gate, then the new task must start execution with NMIs
2249 		 * blocked so don't clear NMI blocking in this case.
2250 		 */
2251 		intr_type = idtvec_info & VMCS_INTR_T_MASK;
2252 		if (intr_type == VMCS_INTR_T_NMI) {
2253 			if (reason != EXIT_REASON_TASK_SWITCH)
2254 				vmx_clear_nmi_blocking(vmx, vcpu);
2255 			else
2256 				vmx_assert_nmi_blocking(vmx, vcpu);
2257 		}
2258 
2259 		/*
2260 		 * Update VM-entry instruction length if the event being
2261 		 * delivered was a software interrupt or software exception.
2262 		 */
2263 		if (intr_type == VMCS_INTR_T_SWINTR ||
2264 		    intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2265 		    intr_type == VMCS_INTR_T_SWEXCEPTION) {
2266 			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2267 		}
2268 	}
2269 
2270 	switch (reason) {
2271 	case EXIT_REASON_TRIPLE_FAULT:
2272 		(void) vm_suspend(vmx->vm, VM_SUSPEND_TRIPLEFAULT);
2273 		handled = HANDLED;
2274 		break;
2275 	case EXIT_REASON_TASK_SWITCH:
2276 		ts = &vmexit->u.task_switch;
2277 		ts->tsssel = qual & 0xffff;
2278 		ts->reason = vmx_task_switch_reason(qual);
2279 		ts->ext = 0;
2280 		ts->errcode_valid = 0;
2281 		vmx_paging_info(&ts->paging);
2282 		/*
2283 		 * If the task switch was due to a CALL, JMP, IRET, software
2284 		 * interrupt (INT n) or software exception (INT3, INTO),
2285 		 * then the saved %rip references the instruction that caused
2286 		 * the task switch. The instruction length field in the VMCS
2287 		 * is valid in this case.
2288 		 *
2289 		 * In all other cases (e.g., NMI, hardware exception) the
2290 		 * saved %rip is one that would have been saved in the old TSS
2291 		 * had the task switch completed normally so the instruction
2292 		 * length field is not needed in this case and is explicitly
2293 		 * set to 0.
2294 		 */
2295 		if (ts->reason == TSR_IDT_GATE) {
2296 			KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2297 			    ("invalid idtvec_info %x for IDT task switch",
2298 			    idtvec_info));
2299 			intr_type = idtvec_info & VMCS_INTR_T_MASK;
2300 			if (intr_type != VMCS_INTR_T_SWINTR &&
2301 			    intr_type != VMCS_INTR_T_SWEXCEPTION &&
2302 			    intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2303 				/* Task switch triggered by external event */
2304 				ts->ext = 1;
2305 				vmexit->inst_length = 0;
2306 				if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2307 					ts->errcode_valid = 1;
2308 					ts->errcode =
2309 					    vmcs_read(VMCS_IDT_VECTORING_ERROR);
2310 				}
2311 			}
2312 		}
2313 		vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2314 		SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts);
2315 		break;
2316 	case EXIT_REASON_CR_ACCESS:
2317 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2318 		SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual);
2319 		switch (qual & 0xf) {
2320 		case 0:
2321 			handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2322 			break;
2323 		case 4:
2324 			handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2325 			break;
2326 		case 8:
2327 			handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2328 			break;
2329 		}
2330 		break;
2331 	case EXIT_REASON_RDMSR:
2332 	case EXIT_REASON_WRMSR:
2333 		handled = vmx_handle_msr(vmx, vcpu, vmexit,
2334 		    reason == EXIT_REASON_WRMSR);
2335 		break;
2336 	case EXIT_REASON_HLT:
2337 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2338 		SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit);
2339 		vmexit->exitcode = VM_EXITCODE_HLT;
2340 		vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2341 		break;
2342 	case EXIT_REASON_MTF:
2343 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2344 		SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit);
2345 		vmexit->exitcode = VM_EXITCODE_MTRAP;
2346 		vmexit->inst_length = 0;
2347 		break;
2348 	case EXIT_REASON_PAUSE:
2349 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2350 		SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit);
2351 		vmexit->exitcode = VM_EXITCODE_PAUSE;
2352 		break;
2353 	case EXIT_REASON_INTR_WINDOW:
2354 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2355 		SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit);
2356 		ASSERT(vmx_int_window_exiting(vmx, vcpu));
2357 		vmx_clear_int_window_exiting(vmx, vcpu);
2358 		return (1);
2359 	case EXIT_REASON_EXT_INTR:
2360 		/*
2361 		 * External interrupts serve only to cause VM exits and allow
2362 		 * the host interrupt handler to run.
2363 		 *
2364 		 * If this external interrupt triggers a virtual interrupt
2365 		 * to a VM, then that state will be recorded by the
2366 		 * host interrupt handler in the VM's softc. We will inject
2367 		 * this virtual interrupt during the subsequent VM enter.
2368 		 */
2369 		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2370 		SDT_PROBE4(vmm, vmx, exit, interrupt,
2371 		    vmx, vcpu, vmexit, intr_info);
2372 
2373 		/*
2374 		 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2375 		 * This appears to be a bug in VMware Fusion?
2376 		 */
2377 		if (!(intr_info & VMCS_INTR_VALID))
2378 			return (1);
2379 		KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2380 		    (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2381 		    ("VM exit interruption info invalid: %x", intr_info));
2382 		vmx_trigger_hostintr(intr_info & 0xff);
2383 
2384 		/*
2385 		 * This is special. We want to treat this as an 'handled'
2386 		 * VM-exit but not increment the instruction pointer.
2387 		 */
2388 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2389 		return (1);
2390 	case EXIT_REASON_NMI_WINDOW:
2391 		SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit);
2392 		/* Exit to allow the pending virtual NMI to be injected */
2393 		if (vm_nmi_pending(vmx->vm, vcpu))
2394 			vmx_inject_nmi(vmx, vcpu);
2395 		ASSERT(vmx_nmi_window_exiting(vmx, vcpu));
2396 		vmx_clear_nmi_window_exiting(vmx, vcpu);
2397 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2398 		return (1);
2399 	case EXIT_REASON_INOUT:
2400 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2401 		vie = vm_vie_ctx(vmx->vm, vcpu);
2402 		vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax);
2403 		SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit);
2404 		break;
2405 	case EXIT_REASON_CPUID:
2406 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2407 		SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit);
2408 		vcpu_emulate_cpuid(vmx->vm, vcpu,
2409 		    (uint64_t *)&vmxctx->guest_rax,
2410 		    (uint64_t *)&vmxctx->guest_rbx,
2411 		    (uint64_t *)&vmxctx->guest_rcx,
2412 		    (uint64_t *)&vmxctx->guest_rdx);
2413 		handled = HANDLED;
2414 		break;
2415 	case EXIT_REASON_EXCEPTION:
2416 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2417 		intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2418 		KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2419 		    ("VM exit interruption info invalid: %x", intr_info));
2420 
2421 		intr_vec = intr_info & 0xff;
2422 		intr_type = intr_info & VMCS_INTR_T_MASK;
2423 
2424 		/*
2425 		 * If Virtual NMIs control is 1 and the VM-exit is due to a
2426 		 * fault encountered during the execution of IRET then we must
2427 		 * restore the state of "virtual-NMI blocking" before resuming
2428 		 * the guest.
2429 		 *
2430 		 * See "Resuming Guest Software after Handling an Exception".
2431 		 * See "Information for VM Exits Due to Vectored Events".
2432 		 */
2433 		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2434 		    (intr_vec != IDT_DF) &&
2435 		    (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2436 			vmx_restore_nmi_blocking(vmx, vcpu);
2437 
2438 		/*
2439 		 * The NMI has already been handled in vmx_exit_handle_nmi().
2440 		 */
2441 		if (intr_type == VMCS_INTR_T_NMI)
2442 			return (1);
2443 
2444 		/*
2445 		 * Call the machine check handler by hand. Also don't reflect
2446 		 * the machine check back into the guest.
2447 		 */
2448 		if (intr_vec == IDT_MC) {
2449 			vmm_call_trap(T_MCE);
2450 			return (1);
2451 		}
2452 
2453 		/*
2454 		 * If the hypervisor has requested user exits for
2455 		 * debug exceptions, bounce them out to userland.
2456 		 */
2457 		if (intr_type == VMCS_INTR_T_SWEXCEPTION &&
2458 		    intr_vec == IDT_BP &&
2459 		    (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) {
2460 			vmexit->exitcode = VM_EXITCODE_BPT;
2461 			vmexit->u.bpt.inst_length = vmexit->inst_length;
2462 			vmexit->inst_length = 0;
2463 			break;
2464 		}
2465 
2466 		if (intr_vec == IDT_PF) {
2467 			vmxctx->guest_cr2 = qual;
2468 		}
2469 
2470 		/*
2471 		 * Software exceptions exhibit trap-like behavior. This in
2472 		 * turn requires populating the VM-entry instruction length
2473 		 * so that the %rip in the trap frame is past the INT3/INTO
2474 		 * instruction.
2475 		 */
2476 		if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2477 			vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2478 
2479 		/* Reflect all other exceptions back into the guest */
2480 		errcode_valid = errcode = 0;
2481 		if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2482 			errcode_valid = 1;
2483 			errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2484 		}
2485 		SDT_PROBE5(vmm, vmx, exit, exception,
2486 		    vmx, vcpu, vmexit, intr_vec, errcode);
2487 		error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2488 		    errcode_valid, errcode, 0);
2489 		KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2490 		    __func__, error));
2491 		return (1);
2492 
2493 	case EXIT_REASON_EPT_FAULT:
2494 		/*
2495 		 * If 'gpa' lies within the address space allocated to
2496 		 * memory then this must be a nested page fault otherwise
2497 		 * this must be an instruction that accesses MMIO space.
2498 		 */
2499 		gpa = vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS);
2500 		if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2501 		    apic_access_fault(vmx, vcpu, gpa)) {
2502 			vmexit->exitcode = VM_EXITCODE_PAGING;
2503 			vmexit->inst_length = 0;
2504 			vmexit->u.paging.gpa = gpa;
2505 			vmexit->u.paging.fault_type = ept_fault_type(qual);
2506 			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2507 			SDT_PROBE5(vmm, vmx, exit, nestedfault,
2508 			    vmx, vcpu, vmexit, gpa, qual);
2509 		} else if (ept_emulation_fault(qual)) {
2510 			vie = vm_vie_ctx(vmx->vm, vcpu);
2511 			vmexit_mmio_emul(vmexit, vie, gpa,
2512 			    vmcs_read(VMCS_GUEST_LINEAR_ADDRESS));
2513 			vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
2514 			SDT_PROBE4(vmm, vmx, exit, mmiofault,
2515 			    vmx, vcpu, vmexit, gpa);
2516 		}
2517 		/*
2518 		 * If Virtual NMIs control is 1 and the VM-exit is due to an
2519 		 * EPT fault during the execution of IRET then we must restore
2520 		 * the state of "virtual-NMI blocking" before resuming.
2521 		 *
2522 		 * See description of "NMI unblocking due to IRET" in
2523 		 * "Exit Qualification for EPT Violations".
2524 		 */
2525 		if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2526 		    (qual & EXIT_QUAL_NMIUDTI) != 0)
2527 			vmx_restore_nmi_blocking(vmx, vcpu);
2528 		break;
2529 	case EXIT_REASON_VIRTUALIZED_EOI:
2530 		vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2531 		vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2532 		SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit);
2533 		vmexit->inst_length = 0;	/* trap-like */
2534 		break;
2535 	case EXIT_REASON_APIC_ACCESS:
2536 		SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit);
2537 		handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2538 		break;
2539 	case EXIT_REASON_APIC_WRITE:
2540 		/*
2541 		 * APIC-write VM exit is trap-like so the %rip is already
2542 		 * pointing to the next instruction.
2543 		 */
2544 		vmexit->inst_length = 0;
2545 		vlapic = vm_lapic(vmx->vm, vcpu);
2546 		SDT_PROBE4(vmm, vmx, exit, apicwrite,
2547 		    vmx, vcpu, vmexit, vlapic);
2548 		handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2549 		break;
2550 	case EXIT_REASON_XSETBV:
2551 		SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit);
2552 		handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2553 		break;
2554 	case EXIT_REASON_MONITOR:
2555 		SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit);
2556 		vmexit->exitcode = VM_EXITCODE_MONITOR;
2557 		break;
2558 	case EXIT_REASON_MWAIT:
2559 		SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit);
2560 		vmexit->exitcode = VM_EXITCODE_MWAIT;
2561 		break;
2562 	case EXIT_REASON_TPR:
2563 		vlapic = vm_lapic(vmx->vm, vcpu);
2564 		vlapic_sync_tpr(vlapic);
2565 		vmexit->inst_length = 0;
2566 		handled = HANDLED;
2567 		break;
2568 	case EXIT_REASON_VMCALL:
2569 	case EXIT_REASON_VMCLEAR:
2570 	case EXIT_REASON_VMLAUNCH:
2571 	case EXIT_REASON_VMPTRLD:
2572 	case EXIT_REASON_VMPTRST:
2573 	case EXIT_REASON_VMREAD:
2574 	case EXIT_REASON_VMRESUME:
2575 	case EXIT_REASON_VMWRITE:
2576 	case EXIT_REASON_VMXOFF:
2577 	case EXIT_REASON_VMXON:
2578 		SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit);
2579 		vmexit->exitcode = VM_EXITCODE_VMINSN;
2580 		break;
2581 	case EXIT_REASON_INVD:
2582 	case EXIT_REASON_WBINVD:
2583 		/* ignore exit */
2584 		handled = HANDLED;
2585 		break;
2586 	default:
2587 		SDT_PROBE4(vmm, vmx, exit, unknown,
2588 		    vmx, vcpu, vmexit, reason);
2589 		vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2590 		break;
2591 	}
2592 
2593 	if (handled) {
2594 		/*
2595 		 * It is possible that control is returned to userland
2596 		 * even though we were able to handle the VM exit in the
2597 		 * kernel.
2598 		 *
2599 		 * In such a case we want to make sure that the userland
2600 		 * restarts guest execution at the instruction *after*
2601 		 * the one we just processed. Therefore we update the
2602 		 * guest rip in the VMCS and in 'vmexit'.
2603 		 */
2604 		vmexit->rip += vmexit->inst_length;
2605 		vmexit->inst_length = 0;
2606 		vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2607 	} else {
2608 		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2609 			/*
2610 			 * If this VM exit was not claimed by anybody then
2611 			 * treat it as a generic VMX exit.
2612 			 */
2613 			vmexit->exitcode = VM_EXITCODE_VMX;
2614 			vmexit->u.vmx.status = VM_SUCCESS;
2615 			vmexit->u.vmx.inst_type = 0;
2616 			vmexit->u.vmx.inst_error = 0;
2617 		} else {
2618 			/*
2619 			 * The exitcode and collateral have been populated.
2620 			 * The VM exit will be processed further in userland.
2621 			 */
2622 		}
2623 	}
2624 
2625 	SDT_PROBE4(vmm, vmx, exit, return,
2626 	    vmx, vcpu, vmexit, handled);
2627 	return (handled);
2628 }
2629 
2630 static void
2631 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2632 {
2633 
2634 	KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2635 	    ("vmx_exit_inst_error: invalid inst_fail_status %d",
2636 	    vmxctx->inst_fail_status));
2637 
2638 	vmexit->inst_length = 0;
2639 	vmexit->exitcode = VM_EXITCODE_VMX;
2640 	vmexit->u.vmx.status = vmxctx->inst_fail_status;
2641 	vmexit->u.vmx.inst_error = vmcs_read(VMCS_INSTRUCTION_ERROR);
2642 	vmexit->u.vmx.exit_reason = ~0;
2643 	vmexit->u.vmx.exit_qualification = ~0;
2644 
2645 	switch (rc) {
2646 	case VMX_VMRESUME_ERROR:
2647 	case VMX_VMLAUNCH_ERROR:
2648 	case VMX_INVEPT_ERROR:
2649 	case VMX_VMWRITE_ERROR:
2650 		vmexit->u.vmx.inst_type = rc;
2651 		break;
2652 	default:
2653 		panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2654 	}
2655 }
2656 
2657 /*
2658  * If the NMI-exiting VM execution control is set to '1' then an NMI in
2659  * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2660  * sufficient to simply vector to the NMI handler via a software interrupt.
2661  * However, this must be done before maskable interrupts are enabled
2662  * otherwise the "iret" issued by an interrupt handler will incorrectly
2663  * clear NMI blocking.
2664  */
2665 static __inline void
2666 vmx_exit_handle_possible_nmi(struct vm_exit *vmexit)
2667 {
2668 	ASSERT(!interrupts_enabled());
2669 
2670 	if (vmexit->u.vmx.exit_reason == EXIT_REASON_EXCEPTION) {
2671 		uint32_t intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2672 		ASSERT(intr_info & VMCS_INTR_VALID);
2673 
2674 		if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2675 			ASSERT3U(intr_info & 0xff, ==, IDT_NMI);
2676 			vmm_call_trap(T_NMIFLT);
2677 		}
2678 	}
2679 }
2680 
2681 static __inline void
2682 vmx_dr_enter_guest(struct vmxctx *vmxctx)
2683 {
2684 	uint64_t rflags;
2685 
2686 	/* Save host control debug registers. */
2687 	vmxctx->host_dr7 = rdr7();
2688 	vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2689 
2690 	/*
2691 	 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2692 	 * exceptions in the host based on the guest DRx values.  The
2693 	 * guest DR7 and DEBUGCTL are saved/restored in the VMCS.
2694 	 */
2695 	load_dr7(0);
2696 	wrmsr(MSR_DEBUGCTLMSR, 0);
2697 
2698 	/*
2699 	 * Disable single stepping the kernel to avoid corrupting the
2700 	 * guest DR6.  A debugger might still be able to corrupt the
2701 	 * guest DR6 by setting a breakpoint after this point and then
2702 	 * single stepping.
2703 	 */
2704 	rflags = read_rflags();
2705 	vmxctx->host_tf = rflags & PSL_T;
2706 	write_rflags(rflags & ~PSL_T);
2707 
2708 	/* Save host debug registers. */
2709 	vmxctx->host_dr0 = rdr0();
2710 	vmxctx->host_dr1 = rdr1();
2711 	vmxctx->host_dr2 = rdr2();
2712 	vmxctx->host_dr3 = rdr3();
2713 	vmxctx->host_dr6 = rdr6();
2714 
2715 	/* Restore guest debug registers. */
2716 	load_dr0(vmxctx->guest_dr0);
2717 	load_dr1(vmxctx->guest_dr1);
2718 	load_dr2(vmxctx->guest_dr2);
2719 	load_dr3(vmxctx->guest_dr3);
2720 	load_dr6(vmxctx->guest_dr6);
2721 }
2722 
2723 static __inline void
2724 vmx_dr_leave_guest(struct vmxctx *vmxctx)
2725 {
2726 
2727 	/* Save guest debug registers. */
2728 	vmxctx->guest_dr0 = rdr0();
2729 	vmxctx->guest_dr1 = rdr1();
2730 	vmxctx->guest_dr2 = rdr2();
2731 	vmxctx->guest_dr3 = rdr3();
2732 	vmxctx->guest_dr6 = rdr6();
2733 
2734 	/*
2735 	 * Restore host debug registers.  Restore DR7, DEBUGCTL, and
2736 	 * PSL_T last.
2737 	 */
2738 	load_dr0(vmxctx->host_dr0);
2739 	load_dr1(vmxctx->host_dr1);
2740 	load_dr2(vmxctx->host_dr2);
2741 	load_dr3(vmxctx->host_dr3);
2742 	load_dr6(vmxctx->host_dr6);
2743 	wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl);
2744 	load_dr7(vmxctx->host_dr7);
2745 	write_rflags(read_rflags() | vmxctx->host_tf);
2746 }
2747 
2748 static int
2749 vmx_run(void *arg, int vcpu, uint64_t rip)
2750 {
2751 	int rc, handled, launched;
2752 	struct vmx *vmx;
2753 	struct vm *vm;
2754 	struct vmxctx *vmxctx;
2755 	uintptr_t vmcs_pa;
2756 	struct vm_exit *vmexit;
2757 	struct vlapic *vlapic;
2758 	uint32_t exit_reason;
2759 	bool tpr_shadow_active;
2760 	vm_client_t *vmc;
2761 
2762 	vmx = arg;
2763 	vm = vmx->vm;
2764 	vmcs_pa = vmx->vmcs_pa[vcpu];
2765 	vmxctx = &vmx->ctx[vcpu];
2766 	vlapic = vm_lapic(vm, vcpu);
2767 	vmexit = vm_exitinfo(vm, vcpu);
2768 	vmc = vm_get_vmclient(vm, vcpu);
2769 	launched = 0;
2770 	tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) &&
2771 	    !vmx_cap_en(vmx, VMX_CAP_APICV) &&
2772 	    (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0;
2773 
2774 	vmx_msr_guest_enter(vmx, vcpu);
2775 
2776 	vmcs_load(vmcs_pa);
2777 
2778 	VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0);
2779 	vmx->vmcs_state[vcpu] = VS_LOADED;
2780 
2781 	/*
2782 	 * XXX
2783 	 * We do this every time because we may setup the virtual machine
2784 	 * from a different process than the one that actually runs it.
2785 	 *
2786 	 * If the life of a virtual machine was spent entirely in the context
2787 	 * of a single process we could do this once in vmx_vminit().
2788 	 */
2789 	vmcs_write(VMCS_HOST_CR3, rcr3());
2790 
2791 	vmcs_write(VMCS_GUEST_RIP, rip);
2792 	vmx_set_pcpu_defaults(vmx, vcpu);
2793 	do {
2794 		enum event_inject_state inject_state;
2795 		uint64_t eptgen;
2796 
2797 		ASSERT3U(vmcs_read(VMCS_GUEST_RIP), ==, rip);
2798 
2799 		handled = UNHANDLED;
2800 
2801 		/*
2802 		 * Perform initial event/exception/interrupt injection before
2803 		 * host CPU interrupts are disabled.
2804 		 */
2805 		inject_state = vmx_inject_events(vmx, vcpu, rip);
2806 
2807 		/*
2808 		 * Interrupts are disabled from this point on until the
2809 		 * guest starts executing. This is done for the following
2810 		 * reasons:
2811 		 *
2812 		 * If an AST is asserted on this thread after the check below,
2813 		 * then the IPI_AST notification will not be lost, because it
2814 		 * will cause a VM exit due to external interrupt as soon as
2815 		 * the guest state is loaded.
2816 		 *
2817 		 * A posted interrupt after vmx_inject_vlapic() will not be
2818 		 * "lost" because it will be held pending in the host APIC
2819 		 * because interrupts are disabled. The pending interrupt will
2820 		 * be recognized as soon as the guest state is loaded.
2821 		 *
2822 		 * The same reasoning applies to the IPI generated by vmspace
2823 		 * invalidation.
2824 		 */
2825 		disable_intr();
2826 
2827 		/*
2828 		 * If not precluded by existing events, inject any interrupt
2829 		 * pending on the vLAPIC.  As a lock-less operation, it is safe
2830 		 * (and prudent) to perform with host CPU interrupts disabled.
2831 		 */
2832 		if (inject_state == EIS_CAN_INJECT) {
2833 			inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic);
2834 		}
2835 
2836 		/*
2837 		 * Check for vCPU bail-out conditions.  This must be done after
2838 		 * vmx_inject_events() to detect a triple-fault condition.
2839 		 */
2840 		if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) {
2841 			enable_intr();
2842 			break;
2843 		}
2844 
2845 		if (vcpu_run_state_pending(vm, vcpu)) {
2846 			enable_intr();
2847 			vm_exit_run_state(vmx->vm, vcpu, rip);
2848 			break;
2849 		}
2850 
2851 		/*
2852 		 * If subsequent activity queued events which require injection
2853 		 * handling, take another lap to handle them.
2854 		 */
2855 		if (vmx_inject_recheck(vmx, vcpu, inject_state)) {
2856 			enable_intr();
2857 			handled = HANDLED;
2858 			continue;
2859 		}
2860 
2861 		if ((rc = smt_acquire()) != 1) {
2862 			enable_intr();
2863 			vmexit->rip = rip;
2864 			vmexit->inst_length = 0;
2865 			if (rc == -1) {
2866 				vmexit->exitcode = VM_EXITCODE_HT;
2867 			} else {
2868 				vmexit->exitcode = VM_EXITCODE_BOGUS;
2869 				handled = HANDLED;
2870 			}
2871 			break;
2872 		}
2873 
2874 		/*
2875 		 * If this thread has gone off-cpu due to mutex operations
2876 		 * during vmx_run, the VMCS will have been unloaded, forcing a
2877 		 * re-VMLAUNCH as opposed to VMRESUME.
2878 		 */
2879 		launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0;
2880 		/*
2881 		 * Restoration of the GDT limit is taken care of by
2882 		 * vmx_savectx().  Since the maximum practical index for the
2883 		 * IDT is 255, restoring its limits from the post-VMX-exit
2884 		 * default of 0xffff is not a concern.
2885 		 *
2886 		 * Only 64-bit hypervisor callers are allowed, which forgoes
2887 		 * the need to restore any LDT descriptor.  Toss an error to
2888 		 * anyone attempting to break that rule.
2889 		 */
2890 		if (curproc->p_model != DATAMODEL_LP64) {
2891 			smt_release();
2892 			enable_intr();
2893 			bzero(vmexit, sizeof (*vmexit));
2894 			vmexit->rip = rip;
2895 			vmexit->exitcode = VM_EXITCODE_VMX;
2896 			vmexit->u.vmx.status = VM_FAIL_INVALID;
2897 			handled = UNHANDLED;
2898 			break;
2899 		}
2900 
2901 		if (tpr_shadow_active) {
2902 			vmx_tpr_shadow_enter(vlapic);
2903 		}
2904 
2905 		/*
2906 		 * Indicate activation of vmspace (EPT) table just prior to VMX
2907 		 * entry, checking for the necessity of an invept invalidation.
2908 		 */
2909 		eptgen = vmc_table_enter(vmc);
2910 		if (vmx->eptgen[curcpu] != eptgen) {
2911 			/*
2912 			 * VMspace generation does not match what was previously
2913 			 * used on this host CPU, so all mappings associated
2914 			 * with this EP4TA must be invalidated.
2915 			 */
2916 			invept(1, vmx->eptp);
2917 			vmx->eptgen[curcpu] = eptgen;
2918 		}
2919 
2920 		vcpu_ustate_change(vm, vcpu, VU_RUN);
2921 		vmx_dr_enter_guest(vmxctx);
2922 
2923 		/* Perform VMX entry */
2924 		rc = vmx_enter_guest(vmxctx, vmx, launched);
2925 
2926 		vmx_dr_leave_guest(vmxctx);
2927 		vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
2928 
2929 		vmx->vmcs_state[vcpu] |= VS_LAUNCHED;
2930 		smt_release();
2931 
2932 		if (tpr_shadow_active) {
2933 			vmx_tpr_shadow_exit(vlapic);
2934 		}
2935 
2936 		/* Collect some information for VM exit processing */
2937 		vmexit->rip = rip = vmcs_read(VMCS_GUEST_RIP);
2938 		vmexit->inst_length = vmcs_read(VMCS_EXIT_INSTRUCTION_LENGTH);
2939 		vmexit->u.vmx.exit_reason = exit_reason =
2940 		    (vmcs_read(VMCS_EXIT_REASON) & BASIC_EXIT_REASON_MASK);
2941 		vmexit->u.vmx.exit_qualification =
2942 		    vmcs_read(VMCS_EXIT_QUALIFICATION);
2943 		/* Update 'nextrip' */
2944 		vmx->state[vcpu].nextrip = rip;
2945 
2946 		if (rc == VMX_GUEST_VMEXIT) {
2947 			vmx_exit_handle_possible_nmi(vmexit);
2948 		}
2949 		enable_intr();
2950 		vmc_table_exit(vmc);
2951 
2952 		if (rc == VMX_GUEST_VMEXIT) {
2953 			handled = vmx_exit_process(vmx, vcpu, vmexit);
2954 		} else {
2955 			vmx_exit_inst_error(vmxctx, rc, vmexit);
2956 		}
2957 		DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip,
2958 		    uint32_t, exit_reason);
2959 		rip = vmexit->rip;
2960 	} while (handled);
2961 
2962 	/* If a VM exit has been handled then the exitcode must be BOGUS */
2963 	if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) {
2964 		panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit",
2965 		    vmexit->exitcode);
2966 	}
2967 
2968 	vmcs_clear(vmcs_pa);
2969 	vmx_msr_guest_exit(vmx, vcpu);
2970 
2971 	VERIFY(vmx->vmcs_state[vcpu] != VS_NONE && curthread->t_preempt != 0);
2972 	vmx->vmcs_state[vcpu] = VS_NONE;
2973 
2974 	return (0);
2975 }
2976 
2977 static void
2978 vmx_vmcleanup(void *arg)
2979 {
2980 	int i;
2981 	struct vmx *vmx = arg;
2982 	uint16_t maxcpus;
2983 
2984 	if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
2985 		(void) vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2986 		kmem_free(vmx->apic_access_page, PAGESIZE);
2987 	} else {
2988 		VERIFY3P(vmx->apic_access_page, ==, NULL);
2989 	}
2990 
2991 	vmx_msr_bitmap_destroy(vmx);
2992 
2993 	maxcpus = vm_get_maxcpus(vmx->vm);
2994 	for (i = 0; i < maxcpus; i++)
2995 		vpid_free(vmx->state[i].vpid);
2996 
2997 	kmem_free(vmx, sizeof (*vmx));
2998 }
2999 
3000 /*
3001  * Ensure that the VMCS for this vcpu is loaded.
3002  * Returns true if a VMCS load was required.
3003  */
3004 static bool
3005 vmx_vmcs_access_ensure(struct vmx *vmx, int vcpu)
3006 {
3007 	int hostcpu;
3008 
3009 	if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) {
3010 		if (hostcpu != curcpu) {
3011 			panic("unexpected vcpu migration %d != %d",
3012 			    hostcpu, curcpu);
3013 		}
3014 		/* Earlier logic already took care of the load */
3015 		return (false);
3016 	} else {
3017 		vmcs_load(vmx->vmcs_pa[vcpu]);
3018 		return (true);
3019 	}
3020 }
3021 
3022 static void
3023 vmx_vmcs_access_done(struct vmx *vmx, int vcpu)
3024 {
3025 	int hostcpu;
3026 
3027 	if (vcpu_is_running(vmx->vm, vcpu, &hostcpu)) {
3028 		if (hostcpu != curcpu) {
3029 			panic("unexpected vcpu migration %d != %d",
3030 			    hostcpu, curcpu);
3031 		}
3032 		/* Later logic will take care of the unload */
3033 	} else {
3034 		vmcs_clear(vmx->vmcs_pa[vcpu]);
3035 	}
3036 }
3037 
3038 static uint64_t *
3039 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
3040 {
3041 	switch (reg) {
3042 	case VM_REG_GUEST_RAX:
3043 		return (&vmxctx->guest_rax);
3044 	case VM_REG_GUEST_RBX:
3045 		return (&vmxctx->guest_rbx);
3046 	case VM_REG_GUEST_RCX:
3047 		return (&vmxctx->guest_rcx);
3048 	case VM_REG_GUEST_RDX:
3049 		return (&vmxctx->guest_rdx);
3050 	case VM_REG_GUEST_RSI:
3051 		return (&vmxctx->guest_rsi);
3052 	case VM_REG_GUEST_RDI:
3053 		return (&vmxctx->guest_rdi);
3054 	case VM_REG_GUEST_RBP:
3055 		return (&vmxctx->guest_rbp);
3056 	case VM_REG_GUEST_R8:
3057 		return (&vmxctx->guest_r8);
3058 	case VM_REG_GUEST_R9:
3059 		return (&vmxctx->guest_r9);
3060 	case VM_REG_GUEST_R10:
3061 		return (&vmxctx->guest_r10);
3062 	case VM_REG_GUEST_R11:
3063 		return (&vmxctx->guest_r11);
3064 	case VM_REG_GUEST_R12:
3065 		return (&vmxctx->guest_r12);
3066 	case VM_REG_GUEST_R13:
3067 		return (&vmxctx->guest_r13);
3068 	case VM_REG_GUEST_R14:
3069 		return (&vmxctx->guest_r14);
3070 	case VM_REG_GUEST_R15:
3071 		return (&vmxctx->guest_r15);
3072 	case VM_REG_GUEST_CR2:
3073 		return (&vmxctx->guest_cr2);
3074 	case VM_REG_GUEST_DR0:
3075 		return (&vmxctx->guest_dr0);
3076 	case VM_REG_GUEST_DR1:
3077 		return (&vmxctx->guest_dr1);
3078 	case VM_REG_GUEST_DR2:
3079 		return (&vmxctx->guest_dr2);
3080 	case VM_REG_GUEST_DR3:
3081 		return (&vmxctx->guest_dr3);
3082 	case VM_REG_GUEST_DR6:
3083 		return (&vmxctx->guest_dr6);
3084 	default:
3085 		break;
3086 	}
3087 	return (NULL);
3088 }
3089 
3090 static int
3091 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
3092 {
3093 	struct vmx *vmx = arg;
3094 	uint64_t *regp;
3095 
3096 	/* VMCS access not required for ctx reads */
3097 	if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) {
3098 		*retval = *regp;
3099 		return (0);
3100 	}
3101 
3102 	bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
3103 	int err = 0;
3104 
3105 	if (reg == VM_REG_GUEST_INTR_SHADOW) {
3106 		uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
3107 		*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
3108 	} else {
3109 		uint32_t encoding;
3110 
3111 		encoding = vmcs_field_encoding(reg);
3112 		switch (encoding) {
3113 		case VMCS_GUEST_CR0:
3114 			/* Take the shadow bits into account */
3115 			*retval = vmx_unshadow_cr0(vmcs_read(encoding),
3116 			    vmcs_read(VMCS_CR0_SHADOW));
3117 			break;
3118 		case VMCS_GUEST_CR4:
3119 			/* Take the shadow bits into account */
3120 			*retval = vmx_unshadow_cr4(vmcs_read(encoding),
3121 			    vmcs_read(VMCS_CR4_SHADOW));
3122 			break;
3123 		case VMCS_INVALID_ENCODING:
3124 			err = EINVAL;
3125 			break;
3126 		default:
3127 			*retval = vmcs_read(encoding);
3128 			break;
3129 		}
3130 	}
3131 
3132 	if (vmcs_loaded) {
3133 		vmx_vmcs_access_done(vmx, vcpu);
3134 	}
3135 	return (err);
3136 }
3137 
3138 static int
3139 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
3140 {
3141 	struct vmx *vmx = arg;
3142 	uint64_t *regp;
3143 
3144 	/* VMCS access not required for ctx writes */
3145 	if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) {
3146 		*regp = val;
3147 		return (0);
3148 	}
3149 
3150 	bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
3151 	int err = 0;
3152 
3153 	if (reg == VM_REG_GUEST_INTR_SHADOW) {
3154 		if (val != 0) {
3155 			/*
3156 			 * Forcing the vcpu into an interrupt shadow is not
3157 			 * presently supported.
3158 			 */
3159 			err = EINVAL;
3160 		} else {
3161 			uint64_t gi;
3162 
3163 			gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
3164 			gi &= ~HWINTR_BLOCKING;
3165 			vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
3166 			err = 0;
3167 		}
3168 	} else {
3169 		uint32_t encoding;
3170 
3171 		err = 0;
3172 		encoding = vmcs_field_encoding(reg);
3173 		switch (encoding) {
3174 		case VMCS_GUEST_IA32_EFER:
3175 			vmcs_write(encoding, val);
3176 			vmx_sync_efer_state(vmx, vcpu, val);
3177 			break;
3178 		case VMCS_GUEST_CR0:
3179 			/*
3180 			 * The guest is not allowed to modify certain bits in
3181 			 * %cr0 and %cr4.  To maintain the illusion of full
3182 			 * control, they have shadow versions which contain the
3183 			 * guest-perceived (via reads from the register) values
3184 			 * as opposed to the guest-effective values.
3185 			 *
3186 			 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6.
3187 			 */
3188 			vmcs_write(VMCS_CR0_SHADOW, val);
3189 			vmcs_write(encoding, vmx_fix_cr0(val));
3190 			break;
3191 		case VMCS_GUEST_CR4:
3192 			/* See above for detail on %cr4 shadowing */
3193 			vmcs_write(VMCS_CR4_SHADOW, val);
3194 			vmcs_write(encoding, vmx_fix_cr4(val));
3195 			break;
3196 		case VMCS_GUEST_CR3:
3197 			vmcs_write(encoding, val);
3198 			/*
3199 			 * Invalidate the guest vcpu's TLB mappings to emulate
3200 			 * the behavior of updating %cr3.
3201 			 *
3202 			 * XXX the processor retains global mappings when %cr3
3203 			 * is updated but vmx_invvpid() does not.
3204 			 */
3205 			vmx_invvpid(vmx, vcpu,
3206 			    vcpu_is_running(vmx->vm, vcpu, NULL));
3207 			break;
3208 		case VMCS_INVALID_ENCODING:
3209 			err = EINVAL;
3210 			break;
3211 		default:
3212 			vmcs_write(encoding, val);
3213 			break;
3214 		}
3215 	}
3216 
3217 	if (vmcs_loaded) {
3218 		vmx_vmcs_access_done(vmx, vcpu);
3219 	}
3220 	return (err);
3221 }
3222 
3223 static int
3224 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc)
3225 {
3226 	struct vmx *vmx = arg;
3227 	uint32_t base, limit, access;
3228 
3229 	bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
3230 
3231 	vmcs_seg_desc_encoding(seg, &base, &limit, &access);
3232 	desc->base = vmcs_read(base);
3233 	desc->limit = vmcs_read(limit);
3234 	if (access != VMCS_INVALID_ENCODING) {
3235 		desc->access = vmcs_read(access);
3236 	} else {
3237 		desc->access = 0;
3238 	}
3239 
3240 	if (vmcs_loaded) {
3241 		vmx_vmcs_access_done(vmx, vcpu);
3242 	}
3243 	return (0);
3244 }
3245 
3246 static int
3247 vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc)
3248 {
3249 	struct vmx *vmx = arg;
3250 	uint32_t base, limit, access;
3251 
3252 	bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
3253 
3254 	vmcs_seg_desc_encoding(seg, &base, &limit, &access);
3255 	vmcs_write(base, desc->base);
3256 	vmcs_write(limit, desc->limit);
3257 	if (access != VMCS_INVALID_ENCODING) {
3258 		vmcs_write(access, desc->access);
3259 	}
3260 
3261 	if (vmcs_loaded) {
3262 		vmx_vmcs_access_done(vmx, vcpu);
3263 	}
3264 	return (0);
3265 }
3266 
3267 static uint64_t *
3268 vmx_msr_ptr(struct vmx *vmx, int vcpu, uint32_t msr)
3269 {
3270 	uint64_t *guest_msrs = vmx->guest_msrs[vcpu];
3271 
3272 	switch (msr) {
3273 	case MSR_LSTAR:
3274 		return (&guest_msrs[IDX_MSR_LSTAR]);
3275 	case MSR_CSTAR:
3276 		return (&guest_msrs[IDX_MSR_CSTAR]);
3277 	case MSR_STAR:
3278 		return (&guest_msrs[IDX_MSR_STAR]);
3279 	case MSR_SF_MASK:
3280 		return (&guest_msrs[IDX_MSR_SF_MASK]);
3281 	case MSR_KGSBASE:
3282 		return (&guest_msrs[IDX_MSR_KGSBASE]);
3283 	case MSR_PAT:
3284 		return (&guest_msrs[IDX_MSR_PAT]);
3285 	default:
3286 		return (NULL);
3287 	}
3288 }
3289 
3290 static int
3291 vmx_msr_get(void *arg, int vcpu, uint32_t msr, uint64_t *valp)
3292 {
3293 	struct vmx *vmx = arg;
3294 
3295 	ASSERT(valp != NULL);
3296 
3297 	const uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr);
3298 	if (msrp != NULL) {
3299 		*valp = *msrp;
3300 		return (0);
3301 	}
3302 
3303 	const uint32_t vmcs_enc = vmcs_msr_encoding(msr);
3304 	if (vmcs_enc != VMCS_INVALID_ENCODING) {
3305 		bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
3306 
3307 		*valp = vmcs_read(vmcs_enc);
3308 
3309 		if (vmcs_loaded) {
3310 			vmx_vmcs_access_done(vmx, vcpu);
3311 		}
3312 		return (0);
3313 	}
3314 
3315 	return (EINVAL);
3316 }
3317 
3318 static int
3319 vmx_msr_set(void *arg, int vcpu, uint32_t msr, uint64_t val)
3320 {
3321 	struct vmx *vmx = arg;
3322 
3323 	/* TODO: mask value */
3324 
3325 	uint64_t *msrp = vmx_msr_ptr(vmx, vcpu, msr);
3326 	if (msrp != NULL) {
3327 		*msrp = val;
3328 		return (0);
3329 	}
3330 
3331 	const uint32_t vmcs_enc = vmcs_msr_encoding(msr);
3332 	if (vmcs_enc != VMCS_INVALID_ENCODING) {
3333 		bool vmcs_loaded = vmx_vmcs_access_ensure(vmx, vcpu);
3334 
3335 		vmcs_write(vmcs_enc, val);
3336 
3337 		if (msr == MSR_EFER) {
3338 			vmx_sync_efer_state(vmx, vcpu, val);
3339 		}
3340 
3341 		if (vmcs_loaded) {
3342 			vmx_vmcs_access_done(vmx, vcpu);
3343 		}
3344 		return (0);
3345 	}
3346 	return (EINVAL);
3347 }
3348 
3349 static int
3350 vmx_getcap(void *arg, int vcpu, int type, int *retval)
3351 {
3352 	struct vmx *vmx = arg;
3353 	int vcap;
3354 	int ret;
3355 
3356 	ret = ENOENT;
3357 
3358 	vcap = vmx->cap[vcpu].set;
3359 
3360 	switch (type) {
3361 	case VM_CAP_HALT_EXIT:
3362 		ret = 0;
3363 		break;
3364 	case VM_CAP_PAUSE_EXIT:
3365 		if (cap_pause_exit)
3366 			ret = 0;
3367 		break;
3368 	case VM_CAP_MTRAP_EXIT:
3369 		if (cap_monitor_trap)
3370 			ret = 0;
3371 		break;
3372 	case VM_CAP_ENABLE_INVPCID:
3373 		if (cap_invpcid)
3374 			ret = 0;
3375 		break;
3376 	case VM_CAP_BPT_EXIT:
3377 		ret = 0;
3378 		break;
3379 	default:
3380 		break;
3381 	}
3382 
3383 	if (ret == 0)
3384 		*retval = (vcap & (1 << type)) ? 1 : 0;
3385 
3386 	return (ret);
3387 }
3388 
3389 static int
3390 vmx_setcap(void *arg, int vcpu, int type, int val)
3391 {
3392 	struct vmx *vmx = arg;
3393 	uint32_t baseval, reg, flag;
3394 	uint32_t *pptr;
3395 	int error;
3396 
3397 	error = ENOENT;
3398 	pptr = NULL;
3399 
3400 	switch (type) {
3401 	case VM_CAP_HALT_EXIT:
3402 		error = 0;
3403 		pptr = &vmx->cap[vcpu].proc_ctls;
3404 		baseval = *pptr;
3405 		flag = PROCBASED_HLT_EXITING;
3406 		reg = VMCS_PRI_PROC_BASED_CTLS;
3407 		break;
3408 	case VM_CAP_MTRAP_EXIT:
3409 		if (cap_monitor_trap) {
3410 			error = 0;
3411 			pptr = &vmx->cap[vcpu].proc_ctls;
3412 			baseval = *pptr;
3413 			flag = PROCBASED_MTF;
3414 			reg = VMCS_PRI_PROC_BASED_CTLS;
3415 		}
3416 		break;
3417 	case VM_CAP_PAUSE_EXIT:
3418 		if (cap_pause_exit) {
3419 			error = 0;
3420 			pptr = &vmx->cap[vcpu].proc_ctls;
3421 			baseval = *pptr;
3422 			flag = PROCBASED_PAUSE_EXITING;
3423 			reg = VMCS_PRI_PROC_BASED_CTLS;
3424 		}
3425 		break;
3426 	case VM_CAP_ENABLE_INVPCID:
3427 		if (cap_invpcid) {
3428 			error = 0;
3429 			pptr = &vmx->cap[vcpu].proc_ctls2;
3430 			baseval = *pptr;
3431 			flag = PROCBASED2_ENABLE_INVPCID;
3432 			reg = VMCS_SEC_PROC_BASED_CTLS;
3433 		}
3434 		break;
3435 	case VM_CAP_BPT_EXIT:
3436 		error = 0;
3437 
3438 		/* Don't change the bitmap if we are tracing all exceptions. */
3439 		if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) {
3440 			pptr = &vmx->cap[vcpu].exc_bitmap;
3441 			baseval = *pptr;
3442 			flag = (1 << IDT_BP);
3443 			reg = VMCS_EXCEPTION_BITMAP;
3444 		}
3445 		break;
3446 	default:
3447 		break;
3448 	}
3449 
3450 	if (error != 0) {
3451 		return (error);
3452 	}
3453 
3454 	if (pptr != NULL) {
3455 		if (val) {
3456 			baseval |= flag;
3457 		} else {
3458 			baseval &= ~flag;
3459 		}
3460 		vmcs_load(vmx->vmcs_pa[vcpu]);
3461 		vmcs_write(reg, baseval);
3462 		vmcs_clear(vmx->vmcs_pa[vcpu]);
3463 
3464 		/*
3465 		 * Update optional stored flags, and record
3466 		 * setting
3467 		 */
3468 		*pptr = baseval;
3469 	}
3470 
3471 	if (val) {
3472 		vmx->cap[vcpu].set |= (1 << type);
3473 	} else {
3474 		vmx->cap[vcpu].set &= ~(1 << type);
3475 	}
3476 
3477 	return (0);
3478 }
3479 
3480 struct vlapic_vtx {
3481 	struct vlapic	vlapic;
3482 
3483 	/* Align to the nearest cacheline */
3484 	uint8_t		_pad[64 - (sizeof (struct vlapic) % 64)];
3485 
3486 	/* TMR handling state for posted interrupts */
3487 	uint32_t	tmr_active[8];
3488 	uint32_t	pending_level[8];
3489 	uint32_t	pending_edge[8];
3490 
3491 	struct pir_desc	*pir_desc;
3492 	struct vmx	*vmx;
3493 	uint_t	pending_prio;
3494 	boolean_t	tmr_sync;
3495 };
3496 
3497 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0);
3498 
3499 #define	VPR_PRIO_BIT(vpr)	(1 << ((vpr) >> 4))
3500 
3501 static vcpu_notify_t
3502 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level)
3503 {
3504 	struct vlapic_vtx *vlapic_vtx;
3505 	struct pir_desc *pir_desc;
3506 	uint32_t mask, tmrval;
3507 	int idx;
3508 	vcpu_notify_t notify = VCPU_NOTIFY_NONE;
3509 
3510 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3511 	pir_desc = vlapic_vtx->pir_desc;
3512 	idx = vector / 32;
3513 	mask = 1UL << (vector % 32);
3514 
3515 	/*
3516 	 * If the currently asserted TMRs do not match the state requested by
3517 	 * the incoming interrupt, an exit will be required to reconcile those
3518 	 * bits in the APIC page.  This will keep the vLAPIC behavior in line
3519 	 * with the architecturally defined expectations.
3520 	 *
3521 	 * If actors of mixed types (edge and level) are racing against the same
3522 	 * vector (toggling its TMR bit back and forth), the results could
3523 	 * inconsistent.  Such circumstances are considered a rare edge case and
3524 	 * are never expected to be found in the wild.
3525 	 */
3526 	tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]);
3527 	if (!level) {
3528 		if ((tmrval & mask) != 0) {
3529 			/* Edge-triggered interrupt needs TMR de-asserted */
3530 			atomic_set_int(&vlapic_vtx->pending_edge[idx], mask);
3531 			atomic_store_rel_long(&pir_desc->pending, 1);
3532 			return (VCPU_NOTIFY_EXIT);
3533 		}
3534 	} else {
3535 		if ((tmrval & mask) == 0) {
3536 			/* Level-triggered interrupt needs TMR asserted */
3537 			atomic_set_int(&vlapic_vtx->pending_level[idx], mask);
3538 			atomic_store_rel_long(&pir_desc->pending, 1);
3539 			return (VCPU_NOTIFY_EXIT);
3540 		}
3541 	}
3542 
3543 	/*
3544 	 * If the interrupt request does not require manipulation of the TMRs
3545 	 * for delivery, set it in PIR descriptor.  It cannot be inserted into
3546 	 * the APIC page while the vCPU might be running.
3547 	 */
3548 	atomic_set_int(&pir_desc->pir[idx], mask);
3549 
3550 	/*
3551 	 * A notification is required whenever the 'pending' bit makes a
3552 	 * transition from 0->1.
3553 	 *
3554 	 * Even if the 'pending' bit is already asserted, notification about
3555 	 * the incoming interrupt may still be necessary.  For example, if a
3556 	 * vCPU is HLTed with a high PPR, a low priority interrupt would cause
3557 	 * the 0->1 'pending' transition with a notification, but the vCPU
3558 	 * would ignore the interrupt for the time being.  The same vCPU would
3559 	 * need to then be notified if a high-priority interrupt arrived which
3560 	 * satisfied the PPR.
3561 	 *
3562 	 * The priorities of interrupts injected while 'pending' is asserted
3563 	 * are tracked in a custom bitfield 'pending_prio'.  Should the
3564 	 * to-be-injected interrupt exceed the priorities already present, the
3565 	 * notification is sent.  The priorities recorded in 'pending_prio' are
3566 	 * cleared whenever the 'pending' bit makes another 0->1 transition.
3567 	 */
3568 	if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) {
3569 		notify = VCPU_NOTIFY_APIC;
3570 		vlapic_vtx->pending_prio = 0;
3571 	} else {
3572 		const uint_t old_prio = vlapic_vtx->pending_prio;
3573 		const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
3574 
3575 		if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) {
3576 			atomic_set_int(&vlapic_vtx->pending_prio, prio_bit);
3577 			notify = VCPU_NOTIFY_APIC;
3578 		}
3579 	}
3580 
3581 	return (notify);
3582 }
3583 
3584 static void
3585 vmx_apicv_accepted(struct vlapic *vlapic, int vector)
3586 {
3587 	/*
3588 	 * When APICv is enabled for an instance, the traditional interrupt
3589 	 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not
3590 	 * used and the CPU does the heavy lifting of virtual interrupt
3591 	 * delivery.  For that reason vmx_intr_accepted() should never be called
3592 	 * when APICv is enabled.
3593 	 */
3594 	panic("vmx_intr_accepted: not expected to be called");
3595 }
3596 
3597 static void
3598 vmx_apicv_sync_tmr(struct vlapic *vlapic)
3599 {
3600 	struct vlapic_vtx *vlapic_vtx;
3601 	const uint32_t *tmrs;
3602 
3603 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3604 	tmrs = &vlapic_vtx->tmr_active[0];
3605 
3606 	if (!vlapic_vtx->tmr_sync) {
3607 		return;
3608 	}
3609 
3610 	vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]);
3611 	vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]);
3612 	vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]);
3613 	vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]);
3614 	vlapic_vtx->tmr_sync = B_FALSE;
3615 }
3616 
3617 static void
3618 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic)
3619 {
3620 	struct vmx *vmx;
3621 	uint32_t proc_ctls;
3622 	int vcpuid;
3623 
3624 	vcpuid = vlapic->vcpuid;
3625 	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3626 
3627 	proc_ctls = vmx->cap[vcpuid].proc_ctls;
3628 	proc_ctls &= ~PROCBASED_USE_TPR_SHADOW;
3629 	proc_ctls |= PROCBASED_CR8_LOAD_EXITING;
3630 	proc_ctls |= PROCBASED_CR8_STORE_EXITING;
3631 	vmx->cap[vcpuid].proc_ctls = proc_ctls;
3632 
3633 	vmcs_load(vmx->vmcs_pa[vcpuid]);
3634 	vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls);
3635 	vmcs_clear(vmx->vmcs_pa[vcpuid]);
3636 }
3637 
3638 static void
3639 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic)
3640 {
3641 	struct vmx *vmx;
3642 	uint32_t proc_ctls2;
3643 	int vcpuid;
3644 
3645 	vcpuid = vlapic->vcpuid;
3646 	vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3647 
3648 	proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3649 	KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3650 	    ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2));
3651 
3652 	proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3653 	proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3654 	vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3655 
3656 	vmcs_load(vmx->vmcs_pa[vcpuid]);
3657 	vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3658 	vmcs_clear(vmx->vmcs_pa[vcpuid]);
3659 
3660 	vmx_allow_x2apic_msrs(vmx, vcpuid);
3661 }
3662 
3663 static void
3664 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu)
3665 {
3666 	psm_send_pir_ipi(hostcpu);
3667 }
3668 
3669 static void
3670 vmx_apicv_sync(struct vlapic *vlapic)
3671 {
3672 	struct vlapic_vtx *vlapic_vtx;
3673 	struct pir_desc *pir_desc;
3674 	struct LAPIC *lapic;
3675 	uint_t i;
3676 
3677 	vlapic_vtx = (struct vlapic_vtx *)vlapic;
3678 	pir_desc = vlapic_vtx->pir_desc;
3679 	lapic = vlapic->apic_page;
3680 
3681 	if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3682 		return;
3683 	}
3684 
3685 	vlapic_vtx->pending_prio = 0;
3686 
3687 	/* Make sure the invalid (0-15) vectors are not set */
3688 	ASSERT0(vlapic_vtx->pending_level[0] & 0xffff);
3689 	ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff);
3690 	ASSERT0(pir_desc->pir[0] & 0xffff);
3691 
3692 	for (i = 0; i <= 7; i++) {
3693 		uint32_t *tmrp = &lapic->tmr0 + (i * 4);
3694 		uint32_t *irrp = &lapic->irr0 + (i * 4);
3695 
3696 		const uint32_t pending_level =
3697 		    atomic_readandclear_int(&vlapic_vtx->pending_level[i]);
3698 		const uint32_t pending_edge =
3699 		    atomic_readandclear_int(&vlapic_vtx->pending_edge[i]);
3700 		const uint32_t pending_inject =
3701 		    atomic_readandclear_int(&pir_desc->pir[i]);
3702 
3703 		if (pending_level != 0) {
3704 			/*
3705 			 * Level-triggered interrupts assert their corresponding
3706 			 * bit in the TMR when queued in IRR.
3707 			 */
3708 			*tmrp |= pending_level;
3709 			*irrp |= pending_level;
3710 		}
3711 		if (pending_edge != 0) {
3712 			/*
3713 			 * When queuing an edge-triggered interrupt in IRR, the
3714 			 * corresponding bit in the TMR is cleared.
3715 			 */
3716 			*tmrp &= ~pending_edge;
3717 			*irrp |= pending_edge;
3718 		}
3719 		if (pending_inject != 0) {
3720 			/*
3721 			 * Interrupts which do not require a change to the TMR
3722 			 * (because it already matches the necessary state) can
3723 			 * simply be queued in IRR.
3724 			 */
3725 			*irrp |= pending_inject;
3726 		}
3727 
3728 		if (*tmrp != vlapic_vtx->tmr_active[i]) {
3729 			/* Check if VMX EOI triggers require updating. */
3730 			vlapic_vtx->tmr_active[i] = *tmrp;
3731 			vlapic_vtx->tmr_sync = B_TRUE;
3732 		}
3733 	}
3734 }
3735 
3736 static void
3737 vmx_tpr_shadow_enter(struct vlapic *vlapic)
3738 {
3739 	/*
3740 	 * When TPR shadowing is enabled, VMX will initiate a guest exit if its
3741 	 * TPR falls below a threshold priority.  That threshold is set to the
3742 	 * current TPR priority, since guest interrupt status should be
3743 	 * re-evaluated if its TPR is set lower.
3744 	 */
3745 	vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic));
3746 }
3747 
3748 static void
3749 vmx_tpr_shadow_exit(struct vlapic *vlapic)
3750 {
3751 	/*
3752 	 * Unlike full APICv, where changes to the TPR are reflected in the PPR,
3753 	 * with TPR shadowing, that duty is relegated to the VMM.  Upon exit,
3754 	 * the PPR is updated to reflect any change in the TPR here.
3755 	 */
3756 	vlapic_sync_tpr(vlapic);
3757 }
3758 
3759 static struct vlapic *
3760 vmx_vlapic_init(void *arg, int vcpuid)
3761 {
3762 	struct vmx *vmx = arg;
3763 	struct vlapic_vtx *vlapic_vtx;
3764 	struct vlapic *vlapic;
3765 
3766 	vlapic_vtx = kmem_zalloc(sizeof (struct vlapic_vtx), KM_SLEEP);
3767 	vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3768 	vlapic_vtx->vmx = vmx;
3769 
3770 	vlapic = &vlapic_vtx->vlapic;
3771 	vlapic->vm = vmx->vm;
3772 	vlapic->vcpuid = vcpuid;
3773 	vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3774 
3775 	if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) {
3776 		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts;
3777 	}
3778 	if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
3779 		vlapic->ops.set_intr_ready = vmx_apicv_set_ready;
3780 		vlapic->ops.sync_state = vmx_apicv_sync;
3781 		vlapic->ops.intr_accepted = vmx_apicv_accepted;
3782 		vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid;
3783 
3784 		if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) {
3785 			vlapic->ops.post_intr = vmx_apicv_notify;
3786 		}
3787 	}
3788 
3789 	vlapic_init(vlapic);
3790 
3791 	return (vlapic);
3792 }
3793 
3794 static void
3795 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3796 {
3797 	vlapic_cleanup(vlapic);
3798 	kmem_free(vlapic, sizeof (struct vlapic_vtx));
3799 }
3800 
3801 static void
3802 vmx_pause(void *arg, int vcpuid)
3803 {
3804 	struct vmx *vmx = arg;
3805 
3806 	VERIFY(vmx_vmcs_access_ensure(vmx, vcpuid));
3807 
3808 	/* Stash any interrupt/exception pending injection. */
3809 	vmx_stash_intinfo(vmx, vcpuid);
3810 
3811 	/*
3812 	 * Now that no event is pending injection, interrupt-window exiting and
3813 	 * NMI-window exiting can be disabled.  If/when this vCPU is made to run
3814 	 * again, those conditions will be reinstated when the now-queued events
3815 	 * are re-injected.
3816 	 */
3817 	vmx_clear_nmi_window_exiting(vmx, vcpuid);
3818 	vmx_clear_int_window_exiting(vmx, vcpuid);
3819 
3820 	vmx_vmcs_access_done(vmx, vcpuid);
3821 }
3822 
3823 static void
3824 vmx_savectx(void *arg, int vcpu)
3825 {
3826 	struct vmx *vmx = arg;
3827 
3828 	if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) {
3829 		vmcs_clear(vmx->vmcs_pa[vcpu]);
3830 		vmx_msr_guest_exit(vmx, vcpu);
3831 		/*
3832 		 * Having VMCLEARed the VMCS, it can no longer be re-entered
3833 		 * with VMRESUME, but must be VMLAUNCHed again.
3834 		 */
3835 		vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED;
3836 	}
3837 
3838 	reset_gdtr_limit();
3839 }
3840 
3841 static void
3842 vmx_restorectx(void *arg, int vcpu)
3843 {
3844 	struct vmx *vmx = arg;
3845 
3846 	ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED);
3847 
3848 	if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) {
3849 		vmx_msr_guest_enter(vmx, vcpu);
3850 		vmcs_load(vmx->vmcs_pa[vcpu]);
3851 	}
3852 }
3853 
3854 struct vmm_ops vmm_ops_intel = {
3855 	.init		= vmx_init,
3856 	.cleanup	= vmx_cleanup,
3857 	.resume		= vmx_restore,
3858 
3859 	.vminit		= vmx_vminit,
3860 	.vmrun		= vmx_run,
3861 	.vmcleanup	= vmx_vmcleanup,
3862 	.vmgetreg	= vmx_getreg,
3863 	.vmsetreg	= vmx_setreg,
3864 	.vmgetdesc	= vmx_getdesc,
3865 	.vmsetdesc	= vmx_setdesc,
3866 	.vmgetcap	= vmx_getcap,
3867 	.vmsetcap	= vmx_setcap,
3868 	.vlapic_init	= vmx_vlapic_init,
3869 	.vlapic_cleanup	= vmx_vlapic_cleanup,
3870 	.vmpause	= vmx_pause,
3871 
3872 	.vmsavectx	= vmx_savectx,
3873 	.vmrestorectx	= vmx_restorectx,
3874 
3875 	.vmgetmsr	= vmx_msr_get,
3876 	.vmsetmsr	= vmx_msr_set,
3877 };
3878 
3879 /* Side-effect free HW validation derived from checks in vmx_init. */
3880 int
3881 vmx_x86_supported(const char **msg)
3882 {
3883 	int error;
3884 	uint32_t tmp;
3885 
3886 	ASSERT(msg != NULL);
3887 
3888 	/* Check support for primary processor-based VM-execution controls */
3889 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
3890 	    MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING,
3891 	    PROCBASED_CTLS_ZERO_SETTING, &tmp);
3892 	if (error) {
3893 		*msg = "processor does not support desired primary "
3894 		    "processor-based controls";
3895 		return (error);
3896 	}
3897 
3898 	/* Check support for secondary processor-based VM-execution controls */
3899 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
3900 	    MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING,
3901 	    PROCBASED_CTLS2_ZERO_SETTING, &tmp);
3902 	if (error) {
3903 		*msg = "processor does not support desired secondary "
3904 		    "processor-based controls";
3905 		return (error);
3906 	}
3907 
3908 	/* Check support for pin-based VM-execution controls */
3909 	error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
3910 	    MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING,
3911 	    PINBASED_CTLS_ZERO_SETTING, &tmp);
3912 	if (error) {
3913 		*msg = "processor does not support desired pin-based controls";
3914 		return (error);
3915 	}
3916 
3917 	/* Check support for VM-exit controls */
3918 	error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
3919 	    VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp);
3920 	if (error) {
3921 		*msg = "processor does not support desired exit controls";
3922 		return (error);
3923 	}
3924 
3925 	/* Check support for VM-entry controls */
3926 	error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
3927 	    VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp);
3928 	if (error) {
3929 		*msg = "processor does not support desired entry controls";
3930 		return (error);
3931 	}
3932 
3933 	/* Unrestricted guest is nominally optional, but not for us. */
3934 	error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
3935 	    PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp);
3936 	if (error) {
3937 		*msg = "processor does not support desired unrestricted guest "
3938 		    "controls";
3939 		return (error);
3940 	}
3941 
3942 	return (0);
3943 }
3944