xref: /illumos-gate/usr/src/uts/intel/io/vmm/amd/svm.c (revision d9be5d44)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * This file and its contents are supplied under the terms of the
31  * Common Development and Distribution License ("CDDL"), version 1.0.
32  * You may only use this file in accordance with the terms of version
33  * 1.0 of the CDDL.
34  *
35  * A full copy of the text of the CDDL should have accompanied this
36  * source.  A copy of the CDDL is also available via the Internet at
37  * http://www.illumos.org/license/CDDL.
38  *
39  * Copyright 2018 Joyent, Inc.
40  * Copyright 2022 Oxide Computer Company
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/kmem.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/sysctl.h>
53 
54 #include <sys/x86_archext.h>
55 #include <sys/trap.h>
56 
57 #include <machine/cpufunc.h>
58 #include <machine/psl.h>
59 #include <machine/md_var.h>
60 #include <machine/reg.h>
61 #include <machine/specialreg.h>
62 #include <machine/vmm.h>
63 #include <machine/vmm_dev.h>
64 #include <sys/vmm_instruction_emul.h>
65 #include <sys/vmm_vm.h>
66 #include <sys/vmm_kernel.h>
67 
68 #include "vmm_lapic.h"
69 #include "vmm_stat.h"
70 #include "vmm_ioport.h"
71 #include "vatpic.h"
72 #include "vlapic.h"
73 #include "vlapic_priv.h"
74 
75 #include "vmcb.h"
76 #include "svm.h"
77 #include "svm_softc.h"
78 #include "svm_msr.h"
79 
80 SYSCTL_DECL(_hw_vmm);
81 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
82     NULL);
83 
84 /*
85  * SVM CPUID function 0x8000_000A, edx bit decoding.
86  */
87 #define	AMD_CPUID_SVM_NP		BIT(0)  /* Nested paging or RVI */
88 #define	AMD_CPUID_SVM_LBR		BIT(1)  /* Last branch virtualization */
89 #define	AMD_CPUID_SVM_SVML		BIT(2)  /* SVM lock */
90 #define	AMD_CPUID_SVM_NRIP_SAVE		BIT(3)  /* Next RIP is saved */
91 #define	AMD_CPUID_SVM_TSC_RATE		BIT(4)  /* TSC rate control. */
92 #define	AMD_CPUID_SVM_VMCB_CLEAN	BIT(5)  /* VMCB state caching */
93 #define	AMD_CPUID_SVM_FLUSH_BY_ASID	BIT(6)  /* Flush by ASID */
94 #define	AMD_CPUID_SVM_DECODE_ASSIST	BIT(7)  /* Decode assist */
95 #define	AMD_CPUID_SVM_PAUSE_INC		BIT(10) /* Pause intercept filter. */
96 #define	AMD_CPUID_SVM_PAUSE_FTH		BIT(12) /* Pause filter threshold */
97 #define	AMD_CPUID_SVM_AVIC		BIT(13)	/* AVIC present */
98 
99 #define	VMCB_CACHE_DEFAULT	(VMCB_CACHE_ASID	|	\
100 				VMCB_CACHE_IOPM		|	\
101 				VMCB_CACHE_I		|	\
102 				VMCB_CACHE_TPR		|	\
103 				VMCB_CACHE_CR2		|	\
104 				VMCB_CACHE_CR		|	\
105 				VMCB_CACHE_DR		|	\
106 				VMCB_CACHE_DT		|	\
107 				VMCB_CACHE_SEG		|	\
108 				VMCB_CACHE_NP)
109 
110 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
111 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
112     0, NULL);
113 
114 /* SVM features advertised by CPUID.8000000AH:EDX */
115 static uint32_t svm_feature = ~0U;	/* AMD SVM features. */
116 
117 static int disable_npf_assist;
118 
119 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
120 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
121 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
122 
123 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
124 static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val);
125 static void flush_asid(struct svm_softc *sc, int vcpuid);
126 
127 static __inline bool
128 flush_by_asid(void)
129 {
130 	return ((svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID) != 0);
131 }
132 
133 static __inline bool
134 decode_assist(void)
135 {
136 	return ((svm_feature & AMD_CPUID_SVM_DECODE_ASSIST) != 0);
137 }
138 
139 static int
140 svm_cleanup(void)
141 {
142 	/* This is taken care of by the hma registration */
143 	return (0);
144 }
145 
146 static int
147 svm_init(void)
148 {
149 	vmcb_clean &= VMCB_CACHE_DEFAULT;
150 
151 	svm_msr_init();
152 
153 	return (0);
154 }
155 
156 static void
157 svm_restore(void)
158 {
159 	/* No-op on illumos */
160 }
161 
162 /* Pentium compatible MSRs */
163 #define	MSR_PENTIUM_START	0
164 #define	MSR_PENTIUM_END		0x1FFF
165 /* AMD 6th generation and Intel compatible MSRs */
166 #define	MSR_AMD6TH_START	0xC0000000UL
167 #define	MSR_AMD6TH_END		0xC0001FFFUL
168 /* AMD 7th and 8th generation compatible MSRs */
169 #define	MSR_AMD7TH_START	0xC0010000UL
170 #define	MSR_AMD7TH_END		0xC0011FFFUL
171 
172 /*
173  * Get the index and bit position for a MSR in permission bitmap.
174  * Two bits are used for each MSR: lower bit for read and higher bit for write.
175  */
176 static int
177 svm_msr_index(uint64_t msr, int *index, int *bit)
178 {
179 	uint32_t base, off;
180 
181 	*index = -1;
182 	*bit = (msr % 4) * 2;
183 	base = 0;
184 
185 	if (msr <= MSR_PENTIUM_END) {
186 		*index = msr / 4;
187 		return (0);
188 	}
189 
190 	base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
191 	if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
192 		off = (msr - MSR_AMD6TH_START);
193 		*index = (off + base) / 4;
194 		return (0);
195 	}
196 
197 	base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
198 	if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
199 		off = (msr - MSR_AMD7TH_START);
200 		*index = (off + base) / 4;
201 		return (0);
202 	}
203 
204 	return (EINVAL);
205 }
206 
207 /*
208  * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
209  */
210 static void
211 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
212 {
213 	int index, bit, error;
214 
215 	error = svm_msr_index(msr, &index, &bit);
216 	KASSERT(error == 0, ("%s: invalid msr %lx", __func__, msr));
217 	KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
218 	    ("%s: invalid index %d for msr %lx", __func__, index, msr));
219 	KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
220 	    "msr %lx", __func__, bit, msr));
221 
222 	if (read)
223 		perm_bitmap[index] &= ~(1UL << bit);
224 
225 	if (write)
226 		perm_bitmap[index] &= ~(2UL << bit);
227 }
228 
229 static void
230 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
231 {
232 
233 	svm_msr_perm(perm_bitmap, msr, true, true);
234 }
235 
236 static void
237 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
238 {
239 
240 	svm_msr_perm(perm_bitmap, msr, true, false);
241 }
242 
243 static __inline int
244 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
245 {
246 	struct vmcb_ctrl *ctrl;
247 
248 	KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
249 
250 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
251 	return (ctrl->intercept[idx] & bitmask ? 1 : 0);
252 }
253 
254 static __inline void
255 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
256     int enabled)
257 {
258 	struct vmcb_ctrl *ctrl;
259 	uint32_t oldval;
260 
261 	KASSERT(idx >= 0 && idx < 5, ("invalid intercept index %d", idx));
262 
263 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
264 	oldval = ctrl->intercept[idx];
265 
266 	if (enabled)
267 		ctrl->intercept[idx] |= bitmask;
268 	else
269 		ctrl->intercept[idx] &= ~bitmask;
270 
271 	if (ctrl->intercept[idx] != oldval) {
272 		svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
273 	}
274 }
275 
276 static __inline void
277 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
278 {
279 
280 	svm_set_intercept(sc, vcpu, off, bitmask, 0);
281 }
282 
283 static __inline void
284 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
285 {
286 
287 	svm_set_intercept(sc, vcpu, off, bitmask, 1);
288 }
289 
290 static void
291 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
292     uint64_t msrpm_base_pa, uint64_t np_pml4)
293 {
294 	struct vmcb_ctrl *ctrl;
295 	struct vmcb_state *state;
296 	uint32_t mask;
297 	int n;
298 
299 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
300 	state = svm_get_vmcb_state(sc, vcpu);
301 
302 	ctrl->iopm_base_pa = iopm_base_pa;
303 	ctrl->msrpm_base_pa = msrpm_base_pa;
304 
305 	/* Enable nested paging */
306 	ctrl->np_ctrl = NP_ENABLE;
307 	ctrl->n_cr3 = np_pml4;
308 
309 	/*
310 	 * Intercept accesses to the control registers that are not shadowed
311 	 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
312 	 */
313 	for (n = 0; n < 16; n++) {
314 		mask = (BIT(n) << 16) | BIT(n);
315 		if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
316 			svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
317 		else
318 			svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
319 	}
320 
321 	/*
322 	 * Selectively intercept writes to %cr0.  This triggers on operations
323 	 * which would change bits other than TS or MP.
324 	 */
325 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
326 	    VMCB_INTCPT_CR0_WRITE);
327 
328 	/*
329 	 * Intercept everything when tracing guest exceptions otherwise
330 	 * just intercept machine check exception.
331 	 */
332 	if (vcpu_trace_exceptions(sc->vm, vcpu)) {
333 		for (n = 0; n < 32; n++) {
334 			/*
335 			 * Skip unimplemented vectors in the exception bitmap.
336 			 */
337 			if (n == 2 || n == 9) {
338 				continue;
339 			}
340 			svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
341 		}
342 	} else {
343 		svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
344 	}
345 
346 	/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
347 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
348 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
349 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
350 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
351 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
352 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
353 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
354 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
355 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
356 	    VMCB_INTCPT_FERR_FREEZE);
357 
358 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
359 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
360 
361 	/* Intercept privileged invalidation instructions. */
362 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
363 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
364 
365 	/*
366 	 * Intercept all virtualization-related instructions.
367 	 *
368 	 * From section "Canonicalization and Consistency Checks" in APMv2
369 	 * the VMRUN intercept bit must be set to pass the consistency check.
370 	 */
371 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
372 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMMCALL);
373 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
374 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
375 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
376 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
377 	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
378 
379 	/*
380 	 * The ASID will be set to a non-zero value just before VMRUN.
381 	 */
382 	ctrl->asid = 0;
383 
384 	/*
385 	 * Section 15.21.1, Interrupt Masking in EFLAGS
386 	 * Section 15.21.2, Virtualizing APIC.TPR
387 	 *
388 	 * This must be set for %rflag and %cr8 isolation of guest and host.
389 	 */
390 	ctrl->v_intr_ctrl |= V_INTR_MASKING;
391 
392 	/* Enable Last Branch Record aka LBR for debugging */
393 	ctrl->misc_ctrl |= LBR_VIRT_ENABLE;
394 	state->dbgctl = BIT(0);
395 
396 	/* EFER_SVM must always be set when the guest is executing */
397 	state->efer = EFER_SVM;
398 
399 	/* Set up the PAT to power-on state */
400 	state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK)	|
401 	    PAT_VALUE(1, PAT_WRITE_THROUGH)	|
402 	    PAT_VALUE(2, PAT_UNCACHED)		|
403 	    PAT_VALUE(3, PAT_UNCACHEABLE)	|
404 	    PAT_VALUE(4, PAT_WRITE_BACK)	|
405 	    PAT_VALUE(5, PAT_WRITE_THROUGH)	|
406 	    PAT_VALUE(6, PAT_UNCACHED)		|
407 	    PAT_VALUE(7, PAT_UNCACHEABLE);
408 
409 	/* Set up DR6/7 to power-on state */
410 	state->dr6 = DBREG_DR6_RESERVED1;
411 	state->dr7 = DBREG_DR7_RESERVED1;
412 }
413 
414 /*
415  * Initialize a virtual machine.
416  */
417 static void *
418 svm_vminit(struct vm *vm)
419 {
420 	struct svm_softc *svm_sc;
421 	struct svm_vcpu *vcpu;
422 	vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
423 	int i;
424 	uint16_t maxcpus;
425 
426 	svm_sc = kmem_zalloc(sizeof (*svm_sc), KM_SLEEP);
427 	VERIFY3U(((uintptr_t)svm_sc & PAGE_MASK),  ==,  0);
428 
429 	svm_sc->msr_bitmap = vmm_contig_alloc(SVM_MSR_BITMAP_SIZE);
430 	if (svm_sc->msr_bitmap == NULL)
431 		panic("contigmalloc of SVM MSR bitmap failed");
432 	svm_sc->iopm_bitmap = vmm_contig_alloc(SVM_IO_BITMAP_SIZE);
433 	if (svm_sc->iopm_bitmap == NULL)
434 		panic("contigmalloc of SVM IO bitmap failed");
435 
436 	svm_sc->vm = vm;
437 	svm_sc->nptp = vmspace_table_root(vm_get_vmspace(vm));
438 
439 	/*
440 	 * Intercept read and write accesses to all MSRs.
441 	 */
442 	memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
443 
444 	/*
445 	 * Access to the following MSRs is redirected to the VMCB when the
446 	 * guest is executing. Therefore it is safe to allow the guest to
447 	 * read/write these MSRs directly without hypervisor involvement.
448 	 */
449 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
450 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
451 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
452 
453 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
454 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
455 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
456 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
457 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
458 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
459 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
460 	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
461 
462 	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
463 
464 	/*
465 	 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
466 	 */
467 	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
468 
469 	/* Intercept access to all I/O ports. */
470 	memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
471 
472 	iopm_pa = vtophys(svm_sc->iopm_bitmap);
473 	msrpm_pa = vtophys(svm_sc->msr_bitmap);
474 	pml4_pa = svm_sc->nptp;
475 	maxcpus = vm_get_maxcpus(svm_sc->vm);
476 	for (i = 0; i < maxcpus; i++) {
477 		vcpu = svm_get_vcpu(svm_sc, i);
478 		vcpu->nextrip = ~0;
479 		vcpu->lastcpu = NOCPU;
480 		vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
481 		vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
482 		svm_msr_guest_init(svm_sc, i);
483 	}
484 	return (svm_sc);
485 }
486 
487 /*
488  * Collateral for a generic SVM VM-exit.
489  */
490 static void
491 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
492 {
493 
494 	vme->exitcode = VM_EXITCODE_SVM;
495 	vme->u.svm.exitcode = code;
496 	vme->u.svm.exitinfo1 = info1;
497 	vme->u.svm.exitinfo2 = info2;
498 }
499 
500 static int
501 svm_cpl(struct vmcb_state *state)
502 {
503 
504 	/*
505 	 * From APMv2:
506 	 *   "Retrieve the CPL from the CPL field in the VMCB, not
507 	 *    from any segment DPL"
508 	 */
509 	return (state->cpl);
510 }
511 
512 static enum vm_cpu_mode
513 svm_vcpu_mode(struct vmcb *vmcb)
514 {
515 	struct vmcb_state *state;
516 
517 	state = &vmcb->state;
518 
519 	if (state->efer & EFER_LMA) {
520 		struct vmcb_segment *seg;
521 
522 		/*
523 		 * Section 4.8.1 for APM2, check if Code Segment has
524 		 * Long attribute set in descriptor.
525 		 */
526 		seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
527 		if (seg->attrib & VMCB_CS_ATTRIB_L)
528 			return (CPU_MODE_64BIT);
529 		else
530 			return (CPU_MODE_COMPATIBILITY);
531 	} else  if (state->cr0 & CR0_PE) {
532 		return (CPU_MODE_PROTECTED);
533 	} else {
534 		return (CPU_MODE_REAL);
535 	}
536 }
537 
538 static enum vm_paging_mode
539 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
540 {
541 
542 	if ((cr0 & CR0_PG) == 0)
543 		return (PAGING_MODE_FLAT);
544 	if ((cr4 & CR4_PAE) == 0)
545 		return (PAGING_MODE_32);
546 	if (efer & EFER_LME)
547 		return (PAGING_MODE_64);
548 	else
549 		return (PAGING_MODE_PAE);
550 }
551 
552 /*
553  * ins/outs utility routines
554  */
555 
556 static void
557 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
558 {
559 	struct vmcb_state *state;
560 
561 	state = &vmcb->state;
562 	paging->cr3 = state->cr3;
563 	paging->cpl = svm_cpl(state);
564 	paging->cpu_mode = svm_vcpu_mode(vmcb);
565 	paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
566 	    state->efer);
567 }
568 
569 #define	UNHANDLED 0
570 
571 /*
572  * Handle guest I/O intercept.
573  */
574 static int
575 svm_handle_inout(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
576 {
577 	struct vmcb_ctrl *ctrl;
578 	struct vmcb_state *state;
579 	struct vm_inout *inout;
580 	struct vie *vie;
581 	uint64_t info1;
582 	struct vm_guest_paging paging;
583 
584 	state = svm_get_vmcb_state(svm_sc, vcpu);
585 	ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
586 	inout = &vmexit->u.inout;
587 	info1 = ctrl->exitinfo1;
588 
589 	inout->bytes = (info1 >> 4) & 0x7;
590 	inout->flags = 0;
591 	inout->flags |= (info1 & BIT(0)) ? INOUT_IN : 0;
592 	inout->flags |= (info1 & BIT(3)) ? INOUT_REP : 0;
593 	inout->flags |= (info1 & BIT(2)) ? INOUT_STR : 0;
594 	inout->port = (uint16_t)(info1 >> 16);
595 	inout->eax = (uint32_t)(state->rax);
596 
597 	if ((inout->flags & INOUT_STR) != 0) {
598 		/*
599 		 * The effective segment number in EXITINFO1[12:10] is populated
600 		 * only if the processor has the DecodeAssist capability.
601 		 *
602 		 * This is not specified explicitly in APMv2 but can be verified
603 		 * empirically.
604 		 */
605 		if (!decode_assist()) {
606 			/*
607 			 * Without decoding assistance, force the task of
608 			 * emulating the ins/outs on userspace.
609 			 */
610 			vmexit->exitcode = VM_EXITCODE_INST_EMUL;
611 			bzero(&vmexit->u.inst_emul,
612 			    sizeof (vmexit->u.inst_emul));
613 			return (UNHANDLED);
614 		}
615 
616 		/*
617 		 * Bits 7-9 encode the address size of ins/outs operations where
618 		 * the 1/2/4 values correspond to 16/32/64 bit sizes.
619 		 */
620 		inout->addrsize = 2 * ((info1 >> 7) & 0x7);
621 		VERIFY(inout->addrsize == 2 || inout->addrsize == 4 ||
622 		    inout->addrsize == 8);
623 
624 		if (inout->flags & INOUT_IN) {
625 			/*
626 			 * For INS instructions, %es (encoded as 0) is the
627 			 * implied segment for the operation.
628 			 */
629 			inout->segment = 0;
630 		} else {
631 			/*
632 			 * Bits 10-12 encode the segment for OUTS.
633 			 * This value follows the standard x86 segment order.
634 			 */
635 			inout->segment = (info1 >> 10) & 0x7;
636 		}
637 	}
638 
639 	vmexit->exitcode = VM_EXITCODE_INOUT;
640 	svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
641 	vie = vm_vie_ctx(svm_sc->vm, vcpu);
642 	vie_init_inout(vie, inout, vmexit->inst_length, &paging);
643 
644 	/* The in/out emulation will handle advancing %rip */
645 	vmexit->inst_length = 0;
646 
647 	return (UNHANDLED);
648 }
649 
650 static int
651 npf_fault_type(uint64_t exitinfo1)
652 {
653 
654 	if (exitinfo1 & VMCB_NPF_INFO1_W)
655 		return (PROT_WRITE);
656 	else if (exitinfo1 & VMCB_NPF_INFO1_ID)
657 		return (PROT_EXEC);
658 	else
659 		return (PROT_READ);
660 }
661 
662 static bool
663 svm_npf_emul_fault(uint64_t exitinfo1)
664 {
665 	if (exitinfo1 & VMCB_NPF_INFO1_ID) {
666 		return (false);
667 	}
668 
669 	if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
670 		return (false);
671 	}
672 
673 	if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
674 		return (false);
675 	}
676 
677 	return (true);
678 }
679 
680 static void
681 svm_handle_mmio_emul(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
682     uint64_t gpa)
683 {
684 	struct vmcb_ctrl *ctrl;
685 	struct vmcb *vmcb;
686 	struct vie *vie;
687 	struct vm_guest_paging paging;
688 	struct vmcb_segment *seg;
689 	char *inst_bytes = NULL;
690 	uint8_t inst_len = 0;
691 
692 	vmcb = svm_get_vmcb(svm_sc, vcpu);
693 	ctrl = &vmcb->ctrl;
694 
695 	vmexit->exitcode = VM_EXITCODE_MMIO_EMUL;
696 	vmexit->u.mmio_emul.gpa = gpa;
697 	vmexit->u.mmio_emul.gla = VIE_INVALID_GLA;
698 	svm_paging_info(vmcb, &paging);
699 
700 	switch (paging.cpu_mode) {
701 	case CPU_MODE_REAL:
702 		seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
703 		vmexit->u.mmio_emul.cs_base = seg->base;
704 		vmexit->u.mmio_emul.cs_d = 0;
705 		break;
706 	case CPU_MODE_PROTECTED:
707 	case CPU_MODE_COMPATIBILITY:
708 		seg = vmcb_segptr(vmcb, VM_REG_GUEST_CS);
709 		vmexit->u.mmio_emul.cs_base = seg->base;
710 
711 		/*
712 		 * Section 4.8.1 of APM2, Default Operand Size or D bit.
713 		 */
714 		vmexit->u.mmio_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ?
715 		    1 : 0;
716 		break;
717 	default:
718 		vmexit->u.mmio_emul.cs_base = 0;
719 		vmexit->u.mmio_emul.cs_d = 0;
720 		break;
721 	}
722 
723 	/*
724 	 * Copy the instruction bytes into 'vie' if available.
725 	 */
726 	if (decode_assist() && !disable_npf_assist) {
727 		inst_len = ctrl->inst_len;
728 		inst_bytes = (char *)ctrl->inst_bytes;
729 	}
730 	vie = vm_vie_ctx(svm_sc->vm, vcpu);
731 	vie_init_mmio(vie, inst_bytes, inst_len, &paging, gpa);
732 }
733 
734 /*
735  * Do not allow CD, NW, or invalid high bits to be asserted in the value of cr0
736  * which is live in the guest.  They are visible via the shadow instead.
737  */
738 #define	SVM_CR0_MASK	~(CR0_CD | CR0_NW | 0xffffffff00000000)
739 
740 static void
741 svm_set_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t val, bool guest_write)
742 {
743 	struct vmcb_state *state;
744 	struct svm_regctx *regctx;
745 	uint64_t masked, old, diff;
746 
747 	state = svm_get_vmcb_state(svm_sc, vcpu);
748 	regctx = svm_get_guest_regctx(svm_sc, vcpu);
749 
750 	old = state->cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK);
751 	diff = old ^ val;
752 
753 	/* No further work needed if register contents remain the same */
754 	if (diff == 0) {
755 		return;
756 	}
757 
758 	/* Flush the TLB if the paging or write-protect bits are changing */
759 	if ((diff & CR0_PG) != 0 || (diff & CR0_WP) != 0) {
760 		flush_asid(svm_sc, vcpu);
761 	}
762 
763 	/*
764 	 * If the change in %cr0 is due to a guest action (via interception)
765 	 * then other CPU state updates may be required.
766 	 */
767 	if (guest_write) {
768 		if ((diff & CR0_PG) != 0) {
769 			uint64_t efer = state->efer;
770 
771 			/* Keep the long-mode state in EFER in sync */
772 			if ((val & CR0_PG) != 0 && (efer & EFER_LME) != 0) {
773 				state->efer |= EFER_LMA;
774 			}
775 			if ((val & CR0_PG) == 0 && (efer & EFER_LME) != 0) {
776 				state->efer &= ~EFER_LMA;
777 			}
778 		}
779 	}
780 
781 	masked = val & SVM_CR0_MASK;
782 	regctx->sctx_cr0_shadow = val;
783 	state->cr0 = masked;
784 	svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_CR);
785 
786 	if ((masked ^ val) != 0) {
787 		/*
788 		 * The guest has set bits in %cr0 which we are masking out and
789 		 * exposing via shadow.
790 		 *
791 		 * We must intercept %cr0 reads in order to make the shadowed
792 		 * view available to the guest.
793 		 *
794 		 * Writes to %cr0 must also be intercepted (unconditionally,
795 		 * unlike the VMCB_INTCPT_CR0_WRITE mechanism) so we can catch
796 		 * if/when the guest clears those shadowed bits.
797 		 */
798 		svm_enable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
799 		    BIT(0) | BIT(16));
800 	} else {
801 		/*
802 		 * When no bits remain in %cr0 which require shadowing, the
803 		 * unconditional intercept of reads/writes to %cr0 can be
804 		 * disabled.
805 		 *
806 		 * The selective write intercept (VMCB_INTCPT_CR0_WRITE) remains
807 		 * in place so we can be notified of operations which change
808 		 * bits other than TS or MP.
809 		 */
810 		svm_disable_intercept(svm_sc, vcpu, VMCB_CR_INTCPT,
811 		    BIT(0) | BIT(16));
812 	}
813 	svm_set_dirty(svm_sc, vcpu, VMCB_CACHE_I);
814 }
815 
816 static void
817 svm_get_cr0(struct svm_softc *svm_sc, int vcpu, uint64_t *val)
818 {
819 	struct vmcb *vmcb;
820 	struct svm_regctx *regctx;
821 
822 	vmcb = svm_get_vmcb(svm_sc, vcpu);
823 	regctx = svm_get_guest_regctx(svm_sc, vcpu);
824 
825 	/*
826 	 * Include the %cr0 bits which exist only in the shadow along with those
827 	 * in the running vCPU state.
828 	 */
829 	*val = vmcb->state.cr0 | (regctx->sctx_cr0_shadow & ~SVM_CR0_MASK);
830 }
831 
832 static void
833 svm_handle_cr0_read(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
834 {
835 	uint64_t val;
836 	int err __maybe_unused;
837 
838 	svm_get_cr0(svm_sc, vcpu, &val);
839 	err = svm_setreg(svm_sc, vcpu, reg, val);
840 	ASSERT(err == 0);
841 }
842 
843 static void
844 svm_handle_cr0_write(struct svm_softc *svm_sc, int vcpu, enum vm_reg_name reg)
845 {
846 	struct vmcb_state *state;
847 	uint64_t val;
848 	int err __maybe_unused;
849 
850 	state = svm_get_vmcb_state(svm_sc, vcpu);
851 
852 	err = svm_getreg(svm_sc, vcpu, reg, &val);
853 	ASSERT(err == 0);
854 
855 	if ((val & CR0_NW) != 0 && (val & CR0_CD) == 0) {
856 		/* NW without CD is nonsensical */
857 		vm_inject_gp(svm_sc->vm, vcpu);
858 		return;
859 	}
860 	if ((val & CR0_PG) != 0 && (val & CR0_PE) == 0) {
861 		/* PG requires PE */
862 		vm_inject_gp(svm_sc->vm, vcpu);
863 		return;
864 	}
865 	if ((state->cr0 & CR0_PG) == 0 && (val & CR0_PG) != 0) {
866 		/* When enabling paging, PAE must be enabled if LME is. */
867 		if ((state->efer & EFER_LME) != 0 &&
868 		    (state->cr4 & CR4_PAE) == 0) {
869 			vm_inject_gp(svm_sc->vm, vcpu);
870 			return;
871 		}
872 	}
873 
874 	svm_set_cr0(svm_sc, vcpu, val, true);
875 }
876 
877 static void
878 svm_inst_emul_other(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
879 {
880 	struct vie *vie;
881 	struct vm_guest_paging paging;
882 
883 	/* Let the instruction emulation (hopefully in-kernel) handle it */
884 	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
885 	bzero(&vmexit->u.inst_emul, sizeof (vmexit->u.inst_emul));
886 	vie = vm_vie_ctx(svm_sc->vm, vcpu);
887 	svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &paging);
888 	vie_init_other(vie, &paging);
889 
890 	/* The instruction emulation will handle advancing %rip */
891 	vmexit->inst_length = 0;
892 }
893 
894 static void
895 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
896 {
897 	struct vm *vm;
898 	struct vlapic *vlapic;
899 	struct vmcb_ctrl *ctrl;
900 
901 	vm = sc->vm;
902 	vlapic = vm_lapic(vm, vcpu);
903 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
904 
905 	/* Update %cr8 in the emulated vlapic */
906 	vlapic_set_cr8(vlapic, ctrl->v_tpr);
907 
908 	/* Virtual interrupt injection is not used. */
909 	KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
910 	    "v_intr_vector %d", __func__, ctrl->v_intr_vector));
911 }
912 
913 CTASSERT(VMCB_EVENTINJ_TYPE_INTR	== VM_INTINFO_HWINTR);
914 CTASSERT(VMCB_EVENTINJ_TYPE_NMI		== VM_INTINFO_NMI);
915 CTASSERT(VMCB_EVENTINJ_TYPE_EXCEPTION	== VM_INTINFO_HWEXCP);
916 CTASSERT(VMCB_EVENTINJ_TYPE_INTn	== VM_INTINFO_SWINTR);
917 CTASSERT(VMCB_EVENTINJ_EC_VALID		== VM_INTINFO_DEL_ERRCODE);
918 CTASSERT(VMCB_EVENTINJ_VALID		== VM_INTINFO_VALID);
919 
920 static void
921 svm_save_exitintinfo(struct svm_softc *svm_sc, int vcpu)
922 {
923 	struct vmcb_ctrl *ctrl;
924 	uint64_t intinfo;
925 	int err;
926 
927 	ctrl  = svm_get_vmcb_ctrl(svm_sc, vcpu);
928 	intinfo = ctrl->exitintinfo;
929 	if (!VMCB_EXITINTINFO_VALID(intinfo))
930 		return;
931 
932 	/*
933 	 * From APMv2, Section "Intercepts during IDT interrupt delivery"
934 	 *
935 	 * If a #VMEXIT happened during event delivery then record the event
936 	 * that was being delivered.
937 	 */
938 	vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
939 	/*
940 	 * Relies on match between VMCB exitintinfo format and bhyve-generic
941 	 * format, which is ensured by CTASSERTs above.
942 	 */
943 	err = vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
944 	VERIFY0(err);
945 }
946 
947 static __inline int
948 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
949 {
950 
951 	return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
952 	    VMCB_INTCPT_VINTR));
953 }
954 
955 static void
956 svm_enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
957 {
958 	struct vmcb_ctrl *ctrl;
959 	struct vmcb_state *state;
960 
961 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
962 	state = svm_get_vmcb_state(sc, vcpu);
963 
964 	if ((ctrl->v_irq & V_IRQ) != 0 && ctrl->v_intr_vector == 0) {
965 		KASSERT(ctrl->v_intr_prio & V_IGN_TPR,
966 		    ("%s: invalid v_ign_tpr", __func__));
967 		KASSERT(vintr_intercept_enabled(sc, vcpu),
968 		    ("%s: vintr intercept should be enabled", __func__));
969 		return;
970 	}
971 
972 	/*
973 	 * We use V_IRQ in conjunction with the VINTR intercept to trap into the
974 	 * hypervisor as soon as a virtual interrupt can be delivered.
975 	 *
976 	 * Since injected events are not subject to intercept checks we need to
977 	 * ensure that the V_IRQ is not actually going to be delivered on VM
978 	 * entry.
979 	 */
980 	VERIFY((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
981 	    (state->rflags & PSL_I) == 0 || ctrl->intr_shadow);
982 
983 	ctrl->v_irq |= V_IRQ;
984 	ctrl->v_intr_prio |= V_IGN_TPR;
985 	ctrl->v_intr_vector = 0;
986 	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
987 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
988 }
989 
990 static void
991 svm_disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
992 {
993 	struct vmcb_ctrl *ctrl;
994 
995 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
996 
997 	if ((ctrl->v_irq & V_IRQ) == 0 && ctrl->v_intr_vector == 0) {
998 		KASSERT(!vintr_intercept_enabled(sc, vcpu),
999 		    ("%s: vintr intercept should be disabled", __func__));
1000 		return;
1001 	}
1002 
1003 	ctrl->v_irq &= ~V_IRQ;
1004 	ctrl->v_intr_vector = 0;
1005 	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1006 	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1007 }
1008 
1009 /*
1010  * Once an NMI is injected it blocks delivery of further NMIs until the handler
1011  * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1012  * to track when the vcpu is done handling the NMI.
1013  */
1014 static int
1015 svm_nmi_blocked(struct svm_softc *sc, int vcpu)
1016 {
1017 	return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1018 	    VMCB_INTCPT_IRET));
1019 }
1020 
1021 static void
1022 svm_clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1023 {
1024 	struct vmcb_ctrl *ctrl;
1025 
1026 	KASSERT(svm_nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1027 	/*
1028 	 * When the IRET intercept is cleared the vcpu will attempt to execute
1029 	 * the "iret" when it runs next. However, it is possible to inject
1030 	 * another NMI into the vcpu before the "iret" has actually executed.
1031 	 *
1032 	 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1033 	 * it will trap back into the hypervisor. If an NMI is pending for
1034 	 * the vcpu it will be injected into the guest.
1035 	 *
1036 	 * XXX this needs to be fixed
1037 	 */
1038 	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1039 
1040 	/*
1041 	 * Set an interrupt shadow to prevent an NMI from being immediately
1042 	 * injected on the next VMRUN.
1043 	 */
1044 	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1045 	ctrl->intr_shadow = 1;
1046 }
1047 
1048 static void
1049 svm_inject_event(struct vmcb_ctrl *ctrl, uint64_t info)
1050 {
1051 	ASSERT(VM_INTINFO_PENDING(info));
1052 
1053 	uint8_t vector = VM_INTINFO_VECTOR(info);
1054 	uint32_t type = VM_INTINFO_TYPE(info);
1055 
1056 	/*
1057 	 * Correct behavior depends on bhyve intinfo event types lining up with
1058 	 * those defined by AMD for event injection in the VMCB.  The CTASSERTs
1059 	 * above svm_save_exitintinfo() ensure it.
1060 	 */
1061 	switch (type) {
1062 	case VM_INTINFO_NMI:
1063 		/* Ensure vector for injected event matches its type (NMI) */
1064 		vector = IDT_NMI;
1065 		break;
1066 	case VM_INTINFO_HWINTR:
1067 	case VM_INTINFO_SWINTR:
1068 		break;
1069 	case VM_INTINFO_HWEXCP:
1070 		if (vector == IDT_NMI) {
1071 			/*
1072 			 * NMIs are expected to be injected with
1073 			 * VMCB_EVENTINJ_TYPE_NMI, rather than as an exception
1074 			 * with the NMI vector.
1075 			 */
1076 			type = VM_INTINFO_NMI;
1077 		}
1078 		VERIFY(vector < 32);
1079 		break;
1080 	default:
1081 		/*
1082 		 * Since there is not strong validation for injected event types
1083 		 * at this point, fall back to software interrupt for those we
1084 		 * do not recognized.
1085 		 */
1086 		type = VM_INTINFO_SWINTR;
1087 		break;
1088 	}
1089 
1090 	ctrl->eventinj = VMCB_EVENTINJ_VALID | type | vector;
1091 	if (VM_INTINFO_HAS_ERRCODE(info)) {
1092 		ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
1093 		ctrl->eventinj |= (uint64_t)VM_INTINFO_ERRCODE(info) << 32;
1094 	}
1095 }
1096 
1097 static void
1098 svm_inject_nmi(struct svm_softc *sc, int vcpu)
1099 {
1100 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1101 
1102 	ASSERT(!svm_nmi_blocked(sc, vcpu));
1103 
1104 	ctrl->eventinj = VMCB_EVENTINJ_VALID | VMCB_EVENTINJ_TYPE_NMI;
1105 	vm_nmi_clear(sc->vm, vcpu);
1106 
1107 	/*
1108 	 * Virtual NMI blocking is now in effect.
1109 	 *
1110 	 * Not only does this block a subsequent NMI injection from taking
1111 	 * place, it also configures an intercept on the IRET so we can track
1112 	 * when the next injection can take place.
1113 	 */
1114 	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1115 }
1116 
1117 static void
1118 svm_inject_irq(struct svm_softc *sc, int vcpu, int vector)
1119 {
1120 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1121 
1122 	ASSERT(vector >= 0 && vector <= 255);
1123 
1124 	ctrl->eventinj = VMCB_EVENTINJ_VALID | vector;
1125 }
1126 
1127 #define	EFER_MBZ_BITS	0xFFFFFFFFFFFF0200UL
1128 
1129 static vm_msr_result_t
1130 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval)
1131 {
1132 	struct vmcb_state *state = svm_get_vmcb_state(sc, vcpu);
1133 	uint64_t lma;
1134 	int error;
1135 
1136 	newval &= ~0xFE;		/* clear the Read-As-Zero (RAZ) bits */
1137 
1138 	if (newval & EFER_MBZ_BITS) {
1139 		return (VMR_GP);
1140 	}
1141 
1142 	/* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1143 	const uint64_t changed = state->efer ^ newval;
1144 	if (changed & EFER_LME) {
1145 		if (state->cr0 & CR0_PG) {
1146 			return (VMR_GP);
1147 		}
1148 	}
1149 
1150 	/* EFER.LMA = EFER.LME & CR0.PG */
1151 	if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) {
1152 		lma = EFER_LMA;
1153 	} else {
1154 		lma = 0;
1155 	}
1156 	if ((newval & EFER_LMA) != lma) {
1157 		return (VMR_GP);
1158 	}
1159 
1160 	if ((newval & EFER_NXE) != 0 &&
1161 	    !vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) {
1162 		return (VMR_GP);
1163 	}
1164 	if ((newval & EFER_FFXSR) != 0 &&
1165 	    !vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) {
1166 		return (VMR_GP);
1167 	}
1168 	if ((newval & EFER_TCE) != 0 &&
1169 	    !vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) {
1170 		return (VMR_GP);
1171 	}
1172 
1173 	/*
1174 	 * Until bhyve has proper support for long-mode segment limits, just
1175 	 * toss a #GP at the guest if they attempt to use it.
1176 	 */
1177 	if (newval & EFER_LMSLE) {
1178 		return (VMR_GP);
1179 	}
1180 
1181 	error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
1182 	VERIFY0(error);
1183 	return (VMR_OK);
1184 }
1185 
1186 static int
1187 svm_handle_msr(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit,
1188     bool is_wrmsr)
1189 {
1190 	struct vmcb_state *state = svm_get_vmcb_state(svm_sc, vcpu);
1191 	struct svm_regctx *ctx = svm_get_guest_regctx(svm_sc, vcpu);
1192 	const uint32_t ecx = ctx->sctx_rcx;
1193 	vm_msr_result_t res;
1194 	uint64_t val = 0;
1195 
1196 	if (is_wrmsr) {
1197 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1198 		val = ctx->sctx_rdx << 32 | (uint32_t)state->rax;
1199 
1200 		if (vlapic_owned_msr(ecx)) {
1201 			struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
1202 
1203 			res = vlapic_wrmsr(vlapic, ecx, val);
1204 		} else if (ecx == MSR_EFER) {
1205 			res = svm_write_efer(svm_sc, vcpu, val);
1206 		} else {
1207 			res = svm_wrmsr(svm_sc, vcpu, ecx, val);
1208 		}
1209 	} else {
1210 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1211 
1212 		if (vlapic_owned_msr(ecx)) {
1213 			struct vlapic *vlapic = vm_lapic(svm_sc->vm, vcpu);
1214 
1215 			res = vlapic_rdmsr(vlapic, ecx, &val);
1216 		} else {
1217 			res = svm_rdmsr(svm_sc, vcpu, ecx, &val);
1218 		}
1219 	}
1220 
1221 	switch (res) {
1222 	case VMR_OK:
1223 		/* Store rdmsr result in the appropriate registers */
1224 		if (!is_wrmsr) {
1225 			state->rax = (uint32_t)val;
1226 			ctx->sctx_rdx = val >> 32;
1227 		}
1228 		return (1);
1229 	case VMR_GP:
1230 		vm_inject_gp(svm_sc->vm, vcpu);
1231 		return (1);
1232 	case VMR_UNHANLDED:
1233 		vmexit->exitcode = is_wrmsr ?
1234 		    VM_EXITCODE_WRMSR : VM_EXITCODE_RDMSR;
1235 		vmexit->u.msr.code = ecx;
1236 		vmexit->u.msr.wval = val;
1237 		return (0);
1238 	default:
1239 		panic("unexpected msr result %u\n", res);
1240 	}
1241 }
1242 
1243 /*
1244  * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1245  * that are due to instruction intercepts as well as MSR and IOIO intercepts
1246  * and exceptions caused by INT3, INTO and BOUND instructions.
1247  *
1248  * Return 1 if the nRIP is valid and 0 otherwise.
1249  */
1250 static int
1251 nrip_valid(uint64_t exitcode)
1252 {
1253 	switch (exitcode) {
1254 	case 0x00 ... 0x0F:	/* read of CR0 through CR15 */
1255 	case 0x10 ... 0x1F:	/* write of CR0 through CR15 */
1256 	case 0x20 ... 0x2F:	/* read of DR0 through DR15 */
1257 	case 0x30 ... 0x3F:	/* write of DR0 through DR15 */
1258 	case 0x43:		/* INT3 */
1259 	case 0x44:		/* INTO */
1260 	case 0x45:		/* BOUND */
1261 	case 0x65 ... 0x7C:	/* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1262 	case 0x80 ... 0x8D:	/* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1263 		return (1);
1264 	default:
1265 		return (0);
1266 	}
1267 }
1268 
1269 static int
1270 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1271 {
1272 	struct vmcb *vmcb;
1273 	struct vmcb_state *state;
1274 	struct vmcb_ctrl *ctrl;
1275 	struct svm_regctx *ctx;
1276 	uint64_t code, info1, info2;
1277 	int handled;
1278 
1279 	ctx = svm_get_guest_regctx(svm_sc, vcpu);
1280 	vmcb = svm_get_vmcb(svm_sc, vcpu);
1281 	state = &vmcb->state;
1282 	ctrl = &vmcb->ctrl;
1283 
1284 	handled = 0;
1285 	code = ctrl->exitcode;
1286 	info1 = ctrl->exitinfo1;
1287 	info2 = ctrl->exitinfo2;
1288 
1289 	vmexit->exitcode = VM_EXITCODE_BOGUS;
1290 	vmexit->rip = state->rip;
1291 	vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1292 
1293 	vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1294 
1295 	/*
1296 	 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1297 	 * in an inconsistent state and can trigger assertions that would
1298 	 * never happen otherwise.
1299 	 */
1300 	if (code == VMCB_EXIT_INVALID) {
1301 		vm_exit_svm(vmexit, code, info1, info2);
1302 		return (0);
1303 	}
1304 
1305 	KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1306 	    "injection valid bit is set %lx", __func__, ctrl->eventinj));
1307 
1308 	KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1309 	    ("invalid inst_length %d: code (%lx), info1 (%lx), info2 (%lx)",
1310 	    vmexit->inst_length, code, info1, info2));
1311 
1312 	svm_update_virqinfo(svm_sc, vcpu);
1313 	svm_save_exitintinfo(svm_sc, vcpu);
1314 
1315 	switch (code) {
1316 	case VMCB_EXIT_CR0_READ:
1317 		if (VMCB_CRx_INFO1_VALID(info1) != 0) {
1318 			svm_handle_cr0_read(svm_sc, vcpu,
1319 			    vie_regnum_map(VMCB_CRx_INFO1_GPR(info1)));
1320 			handled = 1;
1321 		} else {
1322 			/*
1323 			 * If SMSW is used to read the contents of %cr0, then
1324 			 * the VALID bit will not be set in `info1`, since the
1325 			 * handling is different from the mov-to-reg case.
1326 			 *
1327 			 * Punt to the instruction emulation to handle it.
1328 			 */
1329 			svm_inst_emul_other(svm_sc, vcpu, vmexit);
1330 		}
1331 		break;
1332 	case VMCB_EXIT_CR0_WRITE:
1333 	case VMCB_EXIT_CR0_SEL_WRITE:
1334 		if (VMCB_CRx_INFO1_VALID(info1) != 0) {
1335 			svm_handle_cr0_write(svm_sc, vcpu,
1336 			    vie_regnum_map(VMCB_CRx_INFO1_GPR(info1)));
1337 			handled = 1;
1338 		} else {
1339 			/*
1340 			 * Writes to %cr0 without VALID being set in `info1` are
1341 			 * initiated by the LMSW and CLTS instructions.  While
1342 			 * LMSW (like SMSW) sees little use in modern OSes and
1343 			 * bootloaders, CLTS is still used for handling FPU
1344 			 * state transitions.
1345 			 *
1346 			 * Punt to the instruction emulation to handle them.
1347 			 */
1348 			svm_inst_emul_other(svm_sc, vcpu, vmexit);
1349 		}
1350 		break;
1351 	case VMCB_EXIT_IRET:
1352 		/*
1353 		 * Restart execution at "iret" but with the intercept cleared.
1354 		 */
1355 		vmexit->inst_length = 0;
1356 		svm_clear_nmi_blocking(svm_sc, vcpu);
1357 		handled = 1;
1358 		break;
1359 	case VMCB_EXIT_VINTR:	/* interrupt window exiting */
1360 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1361 		svm_disable_intr_window_exiting(svm_sc, vcpu);
1362 		handled = 1;
1363 		break;
1364 	case VMCB_EXIT_INTR:	/* external interrupt */
1365 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1366 		handled = 1;
1367 		break;
1368 	case VMCB_EXIT_NMI:
1369 	case VMCB_EXIT_SMI:
1370 	case VMCB_EXIT_INIT:
1371 		/*
1372 		 * For external NMI/SMI and physical INIT interrupts, simply
1373 		 * continue execution, as those host events will be handled by
1374 		 * the physical CPU.
1375 		 */
1376 		handled = 1;
1377 		break;
1378 	case VMCB_EXIT_EXCP0 ... VMCB_EXIT_EXCP31: {
1379 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1380 
1381 		const uint8_t idtvec = code - VMCB_EXIT_EXCP0;
1382 		uint32_t errcode = 0;
1383 		bool reflect = true;
1384 		bool errcode_valid = false;
1385 
1386 		switch (idtvec) {
1387 		case IDT_MC:
1388 			/* The host will handle the MCE itself. */
1389 			reflect = false;
1390 			vmm_call_trap(T_MCE);
1391 			break;
1392 		case IDT_PF:
1393 			VERIFY0(svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1394 			    info2));
1395 			/* fallthru */
1396 		case IDT_NP:
1397 		case IDT_SS:
1398 		case IDT_GP:
1399 		case IDT_AC:
1400 		case IDT_TS:
1401 			errcode_valid = true;
1402 			errcode = info1;
1403 			break;
1404 
1405 		case IDT_DF:
1406 			errcode_valid = true;
1407 			break;
1408 
1409 		case IDT_BP:
1410 		case IDT_OF:
1411 		case IDT_BR:
1412 			/*
1413 			 * The 'nrip' field is populated for INT3, INTO and
1414 			 * BOUND exceptions and this also implies that
1415 			 * 'inst_length' is non-zero.
1416 			 *
1417 			 * Reset 'inst_length' to zero so the guest %rip at
1418 			 * event injection is identical to what it was when
1419 			 * the exception originally happened.
1420 			 */
1421 			vmexit->inst_length = 0;
1422 			/* fallthru */
1423 		default:
1424 			errcode_valid = false;
1425 			break;
1426 		}
1427 		VERIFY0(vmexit->inst_length);
1428 
1429 		if (reflect) {
1430 			/* Reflect the exception back into the guest */
1431 			VERIFY0(vm_inject_exception(svm_sc->vm, vcpu, idtvec,
1432 			    errcode_valid, errcode, false));
1433 		}
1434 		handled = 1;
1435 		break;
1436 		}
1437 	case VMCB_EXIT_MSR:
1438 		handled = svm_handle_msr(svm_sc, vcpu, vmexit, info1 != 0);
1439 		break;
1440 	case VMCB_EXIT_IO:
1441 		handled = svm_handle_inout(svm_sc, vcpu, vmexit);
1442 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1443 		break;
1444 	case VMCB_EXIT_SHUTDOWN:
1445 		(void) vm_suspend(svm_sc->vm, VM_SUSPEND_TRIPLEFAULT);
1446 		handled = 1;
1447 		break;
1448 	case VMCB_EXIT_INVD:
1449 	case VMCB_EXIT_INVLPGA:
1450 		/* privileged invalidation instructions */
1451 		vm_inject_ud(svm_sc->vm, vcpu);
1452 		handled = 1;
1453 		break;
1454 	case VMCB_EXIT_VMRUN:
1455 	case VMCB_EXIT_VMLOAD:
1456 	case VMCB_EXIT_VMSAVE:
1457 	case VMCB_EXIT_STGI:
1458 	case VMCB_EXIT_CLGI:
1459 	case VMCB_EXIT_SKINIT:
1460 		/* privileged vmm instructions */
1461 		vm_inject_ud(svm_sc->vm, vcpu);
1462 		handled = 1;
1463 		break;
1464 	case VMCB_EXIT_VMMCALL:
1465 		/* No handlers make use of VMMCALL for now */
1466 		vm_inject_ud(svm_sc->vm, vcpu);
1467 		handled = 1;
1468 		break;
1469 	case VMCB_EXIT_CPUID:
1470 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1471 		vcpu_emulate_cpuid(svm_sc->vm, vcpu, &state->rax,
1472 		    &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
1473 		handled = 1;
1474 		break;
1475 	case VMCB_EXIT_HLT:
1476 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1477 		vmexit->exitcode = VM_EXITCODE_HLT;
1478 		vmexit->u.hlt.rflags = state->rflags;
1479 		break;
1480 	case VMCB_EXIT_PAUSE:
1481 		vmexit->exitcode = VM_EXITCODE_PAUSE;
1482 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1483 		break;
1484 	case VMCB_EXIT_NPF:
1485 		/* EXITINFO2 contains the faulting guest physical address */
1486 		if (info1 & VMCB_NPF_INFO1_RSV) {
1487 			/* nested fault with reserved bits set */
1488 		} else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
1489 			vmexit->exitcode = VM_EXITCODE_PAGING;
1490 			vmexit->u.paging.gpa = info2;
1491 			vmexit->u.paging.fault_type = npf_fault_type(info1);
1492 			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1493 		} else if (svm_npf_emul_fault(info1)) {
1494 			svm_handle_mmio_emul(svm_sc, vcpu, vmexit, info2);
1495 			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
1496 		}
1497 		break;
1498 	case VMCB_EXIT_MONITOR:
1499 		vmexit->exitcode = VM_EXITCODE_MONITOR;
1500 		break;
1501 	case VMCB_EXIT_MWAIT:
1502 		vmexit->exitcode = VM_EXITCODE_MWAIT;
1503 		break;
1504 	default:
1505 		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1506 		break;
1507 	}
1508 
1509 	DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, vmexit->rip, uint32_t,
1510 	    code);
1511 
1512 	if (handled) {
1513 		vmexit->rip += vmexit->inst_length;
1514 		vmexit->inst_length = 0;
1515 		state->rip = vmexit->rip;
1516 	} else {
1517 		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1518 			/*
1519 			 * If this VM exit was not claimed by anybody then
1520 			 * treat it as a generic SVM exit.
1521 			 */
1522 			vm_exit_svm(vmexit, code, info1, info2);
1523 		} else {
1524 			/*
1525 			 * The exitcode and collateral have been populated.
1526 			 * The VM exit will be processed further in userland.
1527 			 */
1528 		}
1529 	}
1530 	return (handled);
1531 }
1532 
1533 /*
1534  * Inject exceptions, NMIs, and ExtINTs.
1535  *
1536  * The logic behind these are complicated and may involve mutex contention, so
1537  * the injection is performed without the protection of host CPU interrupts
1538  * being disabled.  This means a racing notification could be "lost",
1539  * necessitating a later call to svm_inject_recheck() to close that window
1540  * of opportunity.
1541  */
1542 static enum event_inject_state
1543 svm_inject_events(struct svm_softc *sc, int vcpu)
1544 {
1545 	struct vmcb_ctrl *ctrl;
1546 	struct vmcb_state *state;
1547 	struct svm_vcpu *vcpustate;
1548 	uint64_t intinfo;
1549 	enum event_inject_state ev_state;
1550 
1551 	state = svm_get_vmcb_state(sc, vcpu);
1552 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1553 	vcpustate = svm_get_vcpu(sc, vcpu);
1554 	ev_state = EIS_CAN_INJECT;
1555 
1556 	/* Clear any interrupt shadow if guest %rip has changed */
1557 	if (vcpustate->nextrip != state->rip) {
1558 		ctrl->intr_shadow = 0;
1559 	}
1560 
1561 	/*
1562 	 * An event is already pending for injection.  This can occur when the
1563 	 * vCPU exits prior to VM entry (like for an AST).
1564 	 */
1565 	if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1566 		return (EIS_EV_EXISTING | EIS_REQ_EXIT);
1567 	}
1568 
1569 	/*
1570 	 * Inject pending events or exceptions for this vcpu.
1571 	 *
1572 	 * An event might be pending because the previous #VMEXIT happened
1573 	 * during event delivery (i.e. ctrl->exitintinfo).
1574 	 *
1575 	 * An event might also be pending because an exception was injected
1576 	 * by the hypervisor (e.g. #PF during instruction emulation).
1577 	 */
1578 	if (vm_entry_intinfo(sc->vm, vcpu, &intinfo)) {
1579 		svm_inject_event(ctrl, intinfo);
1580 		vmm_stat_incr(sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1581 		ev_state = EIS_EV_INJECTED;
1582 	}
1583 
1584 	/* NMI event has priority over interrupts. */
1585 	if (vm_nmi_pending(sc->vm, vcpu) && !svm_nmi_blocked(sc, vcpu)) {
1586 		if (ev_state == EIS_CAN_INJECT) {
1587 			/* Can't inject NMI if vcpu is in an intr_shadow. */
1588 			if (ctrl->intr_shadow) {
1589 				return (EIS_GI_BLOCK);
1590 			}
1591 
1592 			svm_inject_nmi(sc, vcpu);
1593 			ev_state = EIS_EV_INJECTED;
1594 		} else {
1595 			return (ev_state | EIS_REQ_EXIT);
1596 		}
1597 	}
1598 
1599 	if (vm_extint_pending(sc->vm, vcpu)) {
1600 		int vector;
1601 
1602 		if (ev_state != EIS_CAN_INJECT) {
1603 			return (ev_state | EIS_REQ_EXIT);
1604 		}
1605 
1606 		/*
1607 		 * If the guest has disabled interrupts or is in an interrupt
1608 		 * shadow then we cannot inject the pending interrupt.
1609 		 */
1610 		if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) {
1611 			return (EIS_GI_BLOCK);
1612 		}
1613 
1614 		/* Ask the legacy pic for a vector to inject */
1615 		vatpic_pending_intr(sc->vm, &vector);
1616 		KASSERT(vector >= 0 && vector <= 255,
1617 		    ("invalid vector %d from INTR", vector));
1618 
1619 		svm_inject_irq(sc, vcpu, vector);
1620 		vm_extint_clear(sc->vm, vcpu);
1621 		vatpic_intr_accepted(sc->vm, vector);
1622 		ev_state = EIS_EV_INJECTED;
1623 	}
1624 
1625 	return (ev_state);
1626 }
1627 
1628 /*
1629  * Synchronize vLAPIC state and inject any interrupts pending on it.
1630  *
1631  * This is done with host CPU interrupts disabled so notification IPIs will be
1632  * queued on the host APIC and recognized when entering SVM guest context.
1633  */
1634 static enum event_inject_state
1635 svm_inject_vlapic(struct svm_softc *sc, int vcpu, struct vlapic *vlapic,
1636     enum event_inject_state ev_state)
1637 {
1638 	struct vmcb_ctrl *ctrl;
1639 	struct vmcb_state *state;
1640 	int vector;
1641 	uint8_t v_tpr;
1642 
1643 	state = svm_get_vmcb_state(sc, vcpu);
1644 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1645 
1646 	/*
1647 	 * The guest can modify the TPR by writing to %cr8. In guest mode the
1648 	 * CPU reflects this write to V_TPR without hypervisor intervention.
1649 	 *
1650 	 * The guest can also modify the TPR by writing to it via the memory
1651 	 * mapped APIC page. In this case, the write will be emulated by the
1652 	 * hypervisor. For this reason V_TPR must be updated before every
1653 	 * VMRUN.
1654 	 */
1655 	v_tpr = vlapic_get_cr8(vlapic);
1656 	KASSERT(v_tpr <= 15, ("invalid v_tpr %x", v_tpr));
1657 	if (ctrl->v_tpr != v_tpr) {
1658 		ctrl->v_tpr = v_tpr;
1659 		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1660 	}
1661 
1662 	/* If an event cannot otherwise be injected, we are done for now */
1663 	if (ev_state != EIS_CAN_INJECT) {
1664 		return (ev_state);
1665 	}
1666 
1667 	if (!vlapic_pending_intr(vlapic, &vector)) {
1668 		return (EIS_CAN_INJECT);
1669 	}
1670 	KASSERT(vector >= 16 && vector <= 255,
1671 	    ("invalid vector %d from local APIC", vector));
1672 
1673 	/*
1674 	 * If the guest has disabled interrupts or is in an interrupt shadow
1675 	 * then we cannot inject the pending interrupt.
1676 	 */
1677 	if ((state->rflags & PSL_I) == 0 || ctrl->intr_shadow) {
1678 		return (EIS_GI_BLOCK);
1679 	}
1680 
1681 	svm_inject_irq(sc, vcpu, vector);
1682 	vlapic_intr_accepted(vlapic, vector);
1683 	return (EIS_EV_INJECTED);
1684 }
1685 
1686 /*
1687  * Re-check for events to be injected.
1688  *
1689  * Once host CPU interrupts are disabled, check for the presence of any events
1690  * which require injection processing.  If an exit is required upon injection,
1691  * or once the guest becomes interruptable, that will be configured too.
1692  */
1693 static bool
1694 svm_inject_recheck(struct svm_softc *sc, int vcpu,
1695     enum event_inject_state ev_state)
1696 {
1697 	struct vmcb_ctrl *ctrl;
1698 
1699 	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1700 
1701 	if (ev_state == EIS_CAN_INJECT) {
1702 		/*
1703 		 * An active interrupt shadow would preclude us from injecting
1704 		 * any events picked up during a re-check.
1705 		 */
1706 		if (ctrl->intr_shadow != 0) {
1707 			return (false);
1708 		}
1709 
1710 		if (vm_nmi_pending(sc->vm, vcpu) &&
1711 		    !svm_nmi_blocked(sc, vcpu)) {
1712 			/* queued NMI not blocked by NMI-window-exiting */
1713 			return (true);
1714 		}
1715 		if (vm_extint_pending(sc->vm, vcpu)) {
1716 			/* queued ExtINT not blocked by existing injection */
1717 			return (true);
1718 		}
1719 	} else {
1720 		if ((ev_state & EIS_REQ_EXIT) != 0) {
1721 			/*
1722 			 * Use a self-IPI to force an immediate exit after
1723 			 * event injection has occurred.
1724 			 */
1725 			poke_cpu(CPU->cpu_id);
1726 		} else {
1727 			/*
1728 			 * If any event is being injected, an exit immediately
1729 			 * upon becoming interruptable again will allow pending
1730 			 * or newly queued events to be injected in a timely
1731 			 * manner.
1732 			 */
1733 			svm_enable_intr_window_exiting(sc, vcpu);
1734 		}
1735 	}
1736 	return (false);
1737 }
1738 
1739 
1740 static void
1741 check_asid(struct svm_softc *sc, int vcpuid, uint_t thiscpu, uint64_t nptgen)
1742 {
1743 	struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
1744 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1745 	uint8_t flush;
1746 
1747 	flush = hma_svm_asid_update(&vcpustate->hma_asid, flush_by_asid(),
1748 	    vcpustate->nptgen != nptgen);
1749 
1750 	if (flush != VMCB_TLB_FLUSH_NOTHING) {
1751 		ctrl->asid = vcpustate->hma_asid.hsa_asid;
1752 		svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1753 	}
1754 	ctrl->tlb_ctrl = flush;
1755 	vcpustate->nptgen = nptgen;
1756 }
1757 
1758 static void
1759 flush_asid(struct svm_softc *sc, int vcpuid)
1760 {
1761 	struct svm_vcpu *vcpustate = svm_get_vcpu(sc, vcpuid);
1762 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1763 	uint8_t flush;
1764 
1765 	flush = hma_svm_asid_update(&vcpustate->hma_asid, flush_by_asid(),
1766 	    true);
1767 
1768 	ASSERT(flush != VMCB_TLB_FLUSH_NOTHING);
1769 	ctrl->asid = vcpustate->hma_asid.hsa_asid;
1770 	ctrl->tlb_ctrl = flush;
1771 	svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1772 	/*
1773 	 * A potential future optimization: We could choose to update the nptgen
1774 	 * associated with the vCPU, since any pending nptgen change requiring a
1775 	 * flush will be satisfied by the one which has just now been queued.
1776 	 */
1777 }
1778 
1779 static __inline void
1780 disable_gintr(void)
1781 {
1782 	__asm __volatile("clgi");
1783 }
1784 
1785 static __inline void
1786 enable_gintr(void)
1787 {
1788 	__asm __volatile("stgi");
1789 }
1790 
1791 static __inline void
1792 svm_dr_enter_guest(struct svm_regctx *gctx)
1793 {
1794 
1795 	/* Save host control debug registers. */
1796 	gctx->host_dr7 = rdr7();
1797 	gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
1798 
1799 	/*
1800 	 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
1801 	 * exceptions in the host based on the guest DRx values.  The
1802 	 * guest DR6, DR7, and DEBUGCTL are saved/restored in the
1803 	 * VMCB.
1804 	 */
1805 	load_dr7(0);
1806 	wrmsr(MSR_DEBUGCTLMSR, 0);
1807 
1808 	/* Save host debug registers. */
1809 	gctx->host_dr0 = rdr0();
1810 	gctx->host_dr1 = rdr1();
1811 	gctx->host_dr2 = rdr2();
1812 	gctx->host_dr3 = rdr3();
1813 	gctx->host_dr6 = rdr6();
1814 
1815 	/* Restore guest debug registers. */
1816 	load_dr0(gctx->sctx_dr0);
1817 	load_dr1(gctx->sctx_dr1);
1818 	load_dr2(gctx->sctx_dr2);
1819 	load_dr3(gctx->sctx_dr3);
1820 }
1821 
1822 static __inline void
1823 svm_dr_leave_guest(struct svm_regctx *gctx)
1824 {
1825 
1826 	/* Save guest debug registers. */
1827 	gctx->sctx_dr0 = rdr0();
1828 	gctx->sctx_dr1 = rdr1();
1829 	gctx->sctx_dr2 = rdr2();
1830 	gctx->sctx_dr3 = rdr3();
1831 
1832 	/*
1833 	 * Restore host debug registers.  Restore DR7 and DEBUGCTL
1834 	 * last.
1835 	 */
1836 	load_dr0(gctx->host_dr0);
1837 	load_dr1(gctx->host_dr1);
1838 	load_dr2(gctx->host_dr2);
1839 	load_dr3(gctx->host_dr3);
1840 	load_dr6(gctx->host_dr6);
1841 	wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
1842 	load_dr7(gctx->host_dr7);
1843 }
1844 
1845 static void
1846 svm_apply_tsc_adjust(struct svm_softc *svm_sc, int vcpuid)
1847 {
1848 	const uint64_t offset = vcpu_tsc_offset(svm_sc->vm, vcpuid, true);
1849 	struct vmcb_ctrl *ctrl = svm_get_vmcb_ctrl(svm_sc, vcpuid);
1850 
1851 	if (ctrl->tsc_offset != offset) {
1852 		ctrl->tsc_offset = offset;
1853 		svm_set_dirty(svm_sc, vcpuid, VMCB_CACHE_I);
1854 	}
1855 }
1856 
1857 
1858 /*
1859  * Start vcpu with specified RIP.
1860  */
1861 static int
1862 svm_vmrun(void *arg, int vcpu, uint64_t rip)
1863 {
1864 	struct svm_regctx *gctx;
1865 	struct svm_softc *svm_sc;
1866 	struct svm_vcpu *vcpustate;
1867 	struct vmcb_state *state;
1868 	struct vmcb_ctrl *ctrl;
1869 	struct vm_exit *vmexit;
1870 	struct vlapic *vlapic;
1871 	vm_client_t *vmc;
1872 	struct vm *vm;
1873 	uint64_t vmcb_pa;
1874 	int handled;
1875 	uint16_t ldt_sel;
1876 
1877 	svm_sc = arg;
1878 	vm = svm_sc->vm;
1879 
1880 	vcpustate = svm_get_vcpu(svm_sc, vcpu);
1881 	state = svm_get_vmcb_state(svm_sc, vcpu);
1882 	ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1883 	vmexit = vm_exitinfo(vm, vcpu);
1884 	vlapic = vm_lapic(vm, vcpu);
1885 	vmc = vm_get_vmclient(vm, vcpu);
1886 
1887 	gctx = svm_get_guest_regctx(svm_sc, vcpu);
1888 	vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1889 
1890 	if (vcpustate->lastcpu != curcpu) {
1891 		/*
1892 		 * Force new ASID allocation by invalidating the generation.
1893 		 */
1894 		vcpustate->hma_asid.hsa_gen = 0;
1895 
1896 		/*
1897 		 * Invalidate the VMCB state cache by marking all fields dirty.
1898 		 */
1899 		svm_set_dirty(svm_sc, vcpu, 0xffffffff);
1900 
1901 		/*
1902 		 * XXX
1903 		 * Setting 'vcpustate->lastcpu' here is bit premature because
1904 		 * we may return from this function without actually executing
1905 		 * the VMRUN  instruction. This could happen if an AST or yield
1906 		 * condition is pending on the first time through the loop.
1907 		 *
1908 		 * This works for now but any new side-effects of vcpu
1909 		 * migration should take this case into account.
1910 		 */
1911 		vcpustate->lastcpu = curcpu;
1912 		vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1913 	}
1914 
1915 	svm_apply_tsc_adjust(svm_sc, vcpu);
1916 
1917 	svm_msr_guest_enter(svm_sc, vcpu);
1918 
1919 	VERIFY(!vcpustate->loaded && curthread->t_preempt != 0);
1920 	vcpustate->loaded = B_TRUE;
1921 
1922 	/* Update Guest RIP */
1923 	state->rip = rip;
1924 
1925 	do {
1926 		enum event_inject_state inject_state;
1927 		uint64_t nptgen;
1928 
1929 		/*
1930 		 * Initial event injection is complex and may involve mutex
1931 		 * contention, so it must be performed with global interrupts
1932 		 * still enabled.
1933 		 */
1934 		inject_state = svm_inject_events(svm_sc, vcpu);
1935 		handled = 0;
1936 
1937 		/*
1938 		 * Disable global interrupts to guarantee atomicity during
1939 		 * loading of guest state. This includes not only the state
1940 		 * loaded by the "vmrun" instruction but also software state
1941 		 * maintained by the hypervisor: suspended and rendezvous
1942 		 * state, NPT generation number, vlapic interrupts etc.
1943 		 */
1944 		disable_gintr();
1945 
1946 		/*
1947 		 * Synchronizing and injecting vlapic state is lock-free and is
1948 		 * safe (and prudent) to perform with interrupts disabled.
1949 		 */
1950 		inject_state = svm_inject_vlapic(svm_sc, vcpu, vlapic,
1951 		    inject_state);
1952 
1953 		/*
1954 		 * Check for vCPU bail-out conditions.  This must be done after
1955 		 * svm_inject_events() to detect a triple-fault condition.
1956 		 */
1957 		if (vcpu_entry_bailout_checks(vm, vcpu, state->rip)) {
1958 			enable_gintr();
1959 			break;
1960 		}
1961 
1962 		if (vcpu_run_state_pending(vm, vcpu)) {
1963 			enable_gintr();
1964 			vm_exit_run_state(vm, vcpu, state->rip);
1965 			break;
1966 		}
1967 
1968 		/*
1969 		 * If subsequent activity queued events which require injection
1970 		 * handling, take another lap to handle them.
1971 		 */
1972 		if (svm_inject_recheck(svm_sc, vcpu, inject_state)) {
1973 			enable_gintr();
1974 			handled = 1;
1975 			continue;
1976 		}
1977 
1978 		/*
1979 		 * #VMEXIT resumes the host with the guest LDTR, so
1980 		 * save the current LDT selector so it can be restored
1981 		 * after an exit.  The userspace hypervisor probably
1982 		 * doesn't use a LDT, but save and restore it to be
1983 		 * safe.
1984 		 */
1985 		ldt_sel = sldt();
1986 
1987 		/*
1988 		 * Check the vmspace and ASID generations to ensure that the
1989 		 * vcpu does not use stale TLB mappings.
1990 		 */
1991 		nptgen = vmc_table_enter(vmc);
1992 		check_asid(svm_sc, vcpu, curcpu, nptgen);
1993 
1994 		ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
1995 		vcpustate->dirty = 0;
1996 
1997 		/* Launch Virtual Machine. */
1998 		vcpu_ustate_change(vm, vcpu, VU_RUN);
1999 		svm_dr_enter_guest(gctx);
2000 		svm_launch(vmcb_pa, gctx, get_pcpu());
2001 		svm_dr_leave_guest(gctx);
2002 		vcpu_ustate_change(vm, vcpu, VU_EMU_KERN);
2003 
2004 		/* Restore host LDTR. */
2005 		lldt(ldt_sel);
2006 
2007 		/* #VMEXIT disables interrupts so re-enable them here. */
2008 		enable_gintr();
2009 
2010 		vmc_table_exit(vmc);
2011 
2012 		/* Update 'nextrip' */
2013 		vcpustate->nextrip = state->rip;
2014 
2015 		/* Handle #VMEXIT and if required return to user space. */
2016 		handled = svm_vmexit(svm_sc, vcpu, vmexit);
2017 	} while (handled);
2018 
2019 	svm_msr_guest_exit(svm_sc, vcpu);
2020 
2021 	VERIFY(vcpustate->loaded && curthread->t_preempt != 0);
2022 	vcpustate->loaded = B_FALSE;
2023 
2024 	return (0);
2025 }
2026 
2027 static void
2028 svm_vmcleanup(void *arg)
2029 {
2030 	struct svm_softc *sc = arg;
2031 
2032 	vmm_contig_free(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE);
2033 	vmm_contig_free(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE);
2034 	kmem_free(sc, sizeof (*sc));
2035 }
2036 
2037 static uint64_t *
2038 swctx_regptr(struct svm_regctx *regctx, int reg)
2039 {
2040 	switch (reg) {
2041 	case VM_REG_GUEST_RBX:
2042 		return (&regctx->sctx_rbx);
2043 	case VM_REG_GUEST_RCX:
2044 		return (&regctx->sctx_rcx);
2045 	case VM_REG_GUEST_RDX:
2046 		return (&regctx->sctx_rdx);
2047 	case VM_REG_GUEST_RDI:
2048 		return (&regctx->sctx_rdi);
2049 	case VM_REG_GUEST_RSI:
2050 		return (&regctx->sctx_rsi);
2051 	case VM_REG_GUEST_RBP:
2052 		return (&regctx->sctx_rbp);
2053 	case VM_REG_GUEST_R8:
2054 		return (&regctx->sctx_r8);
2055 	case VM_REG_GUEST_R9:
2056 		return (&regctx->sctx_r9);
2057 	case VM_REG_GUEST_R10:
2058 		return (&regctx->sctx_r10);
2059 	case VM_REG_GUEST_R11:
2060 		return (&regctx->sctx_r11);
2061 	case VM_REG_GUEST_R12:
2062 		return (&regctx->sctx_r12);
2063 	case VM_REG_GUEST_R13:
2064 		return (&regctx->sctx_r13);
2065 	case VM_REG_GUEST_R14:
2066 		return (&regctx->sctx_r14);
2067 	case VM_REG_GUEST_R15:
2068 		return (&regctx->sctx_r15);
2069 	case VM_REG_GUEST_DR0:
2070 		return (&regctx->sctx_dr0);
2071 	case VM_REG_GUEST_DR1:
2072 		return (&regctx->sctx_dr1);
2073 	case VM_REG_GUEST_DR2:
2074 		return (&regctx->sctx_dr2);
2075 	case VM_REG_GUEST_DR3:
2076 		return (&regctx->sctx_dr3);
2077 	default:
2078 		return (NULL);
2079 	}
2080 }
2081 
2082 static int
2083 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2084 {
2085 	struct svm_softc *sc;
2086 	struct vmcb *vmcb;
2087 	uint64_t *regp;
2088 	uint64_t *fieldp;
2089 	struct vmcb_segment *seg;
2090 
2091 	sc = arg;
2092 	vmcb = svm_get_vmcb(sc, vcpu);
2093 
2094 	regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
2095 	if (regp != NULL) {
2096 		*val = *regp;
2097 		return (0);
2098 	}
2099 
2100 	switch (ident) {
2101 	case VM_REG_GUEST_INTR_SHADOW:
2102 		*val = (vmcb->ctrl.intr_shadow != 0) ? 1 : 0;
2103 		break;
2104 
2105 	case VM_REG_GUEST_CR0:
2106 		svm_get_cr0(sc, vcpu, val);
2107 		break;
2108 	case VM_REG_GUEST_CR2:
2109 	case VM_REG_GUEST_CR3:
2110 	case VM_REG_GUEST_CR4:
2111 	case VM_REG_GUEST_DR6:
2112 	case VM_REG_GUEST_DR7:
2113 	case VM_REG_GUEST_EFER:
2114 	case VM_REG_GUEST_RAX:
2115 	case VM_REG_GUEST_RFLAGS:
2116 	case VM_REG_GUEST_RIP:
2117 	case VM_REG_GUEST_RSP:
2118 		fieldp = vmcb_regptr(vmcb, ident, NULL);
2119 		*val = *fieldp;
2120 		break;
2121 
2122 	case VM_REG_GUEST_CS:
2123 	case VM_REG_GUEST_DS:
2124 	case VM_REG_GUEST_ES:
2125 	case VM_REG_GUEST_FS:
2126 	case VM_REG_GUEST_GS:
2127 	case VM_REG_GUEST_SS:
2128 	case VM_REG_GUEST_LDTR:
2129 	case VM_REG_GUEST_TR:
2130 		seg = vmcb_segptr(vmcb, ident);
2131 		*val = seg->selector;
2132 		break;
2133 
2134 	case VM_REG_GUEST_GDTR:
2135 	case VM_REG_GUEST_IDTR:
2136 		/* GDTR and IDTR don't have segment selectors */
2137 		return (EINVAL);
2138 
2139 	case VM_REG_GUEST_PDPTE0:
2140 	case VM_REG_GUEST_PDPTE1:
2141 	case VM_REG_GUEST_PDPTE2:
2142 	case VM_REG_GUEST_PDPTE3:
2143 		/*
2144 		 * Unlike VMX, where the PDPTEs are explicitly cached as part of
2145 		 * several well-defined events related to paging (such as
2146 		 * loading %cr3), SVM walks the PDPEs (their PDPTE) as part of
2147 		 * nested paging lookups.  This makes these registers
2148 		 * effectively irrelevant on SVM.
2149 		 *
2150 		 * Rather than tossing an error, emit zeroed values so casual
2151 		 * consumers do not need to be as careful about that difference.
2152 		 */
2153 		*val = 0;
2154 		break;
2155 
2156 	default:
2157 		return (EINVAL);
2158 	}
2159 
2160 	return (0);
2161 }
2162 
2163 static int
2164 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2165 {
2166 	struct svm_softc *sc;
2167 	struct vmcb *vmcb;
2168 	uint64_t *regp;
2169 	uint64_t *fieldp;
2170 	uint32_t dirty;
2171 	struct vmcb_segment *seg;
2172 
2173 	sc = arg;
2174 	vmcb = svm_get_vmcb(sc, vcpu);
2175 
2176 	regp = swctx_regptr(svm_get_guest_regctx(sc, vcpu), ident);
2177 	if (regp != NULL) {
2178 		*regp = val;
2179 		return (0);
2180 	}
2181 
2182 	dirty = VMCB_CACHE_NONE;
2183 	switch (ident) {
2184 	case VM_REG_GUEST_INTR_SHADOW:
2185 		vmcb->ctrl.intr_shadow = (val != 0) ? 1 : 0;
2186 		break;
2187 
2188 	case VM_REG_GUEST_EFER:
2189 		fieldp = vmcb_regptr(vmcb, ident, &dirty);
2190 		/* EFER_SVM must always be set when the guest is executing */
2191 		*fieldp = val | EFER_SVM;
2192 		dirty |= VMCB_CACHE_CR;
2193 		break;
2194 
2195 	case VM_REG_GUEST_CR0:
2196 		svm_set_cr0(sc, vcpu, val, false);
2197 		break;
2198 	case VM_REG_GUEST_CR2:
2199 	case VM_REG_GUEST_CR3:
2200 	case VM_REG_GUEST_CR4:
2201 	case VM_REG_GUEST_DR6:
2202 	case VM_REG_GUEST_DR7:
2203 	case VM_REG_GUEST_RAX:
2204 	case VM_REG_GUEST_RFLAGS:
2205 	case VM_REG_GUEST_RIP:
2206 	case VM_REG_GUEST_RSP:
2207 		fieldp = vmcb_regptr(vmcb, ident, &dirty);
2208 		*fieldp = val;
2209 		break;
2210 
2211 	case VM_REG_GUEST_CS:
2212 	case VM_REG_GUEST_DS:
2213 	case VM_REG_GUEST_ES:
2214 	case VM_REG_GUEST_SS:
2215 	case VM_REG_GUEST_FS:
2216 	case VM_REG_GUEST_GS:
2217 	case VM_REG_GUEST_LDTR:
2218 	case VM_REG_GUEST_TR:
2219 		dirty |= VMCB_CACHE_SEG;
2220 		seg = vmcb_segptr(vmcb, ident);
2221 		seg->selector = (uint16_t)val;
2222 		break;
2223 
2224 	case VM_REG_GUEST_GDTR:
2225 	case VM_REG_GUEST_IDTR:
2226 		/* GDTR and IDTR don't have segment selectors */
2227 		return (EINVAL);
2228 
2229 	case VM_REG_GUEST_PDPTE0:
2230 	case VM_REG_GUEST_PDPTE1:
2231 	case VM_REG_GUEST_PDPTE2:
2232 	case VM_REG_GUEST_PDPTE3:
2233 		/*
2234 		 * PDPEs (AMD's PDPTE) are not cached under SVM, so we can
2235 		 * ignore attempts to set them.  See handler in svm_getreg() for
2236 		 * more details.
2237 		 */
2238 		break;
2239 
2240 	default:
2241 		return (EINVAL);
2242 	}
2243 
2244 	if (dirty != VMCB_CACHE_NONE) {
2245 		svm_set_dirty(sc, vcpu, dirty);
2246 	}
2247 
2248 	/*
2249 	 * XXX deal with CR3 and invalidate TLB entries tagged with the
2250 	 * vcpu's ASID. This needs to be treated differently depending on
2251 	 * whether 'running' is true/false.
2252 	 */
2253 
2254 	return (0);
2255 }
2256 
2257 static int
2258 svm_setdesc(void *arg, int vcpu, int reg, const struct seg_desc *desc)
2259 {
2260 	struct vmcb *vmcb;
2261 	struct svm_softc *sc;
2262 	struct vmcb_segment *seg;
2263 
2264 	sc = arg;
2265 	vmcb = svm_get_vmcb(sc, vcpu);
2266 
2267 	switch (reg) {
2268 	case VM_REG_GUEST_CS:
2269 	case VM_REG_GUEST_DS:
2270 	case VM_REG_GUEST_ES:
2271 	case VM_REG_GUEST_SS:
2272 	case VM_REG_GUEST_FS:
2273 	case VM_REG_GUEST_GS:
2274 	case VM_REG_GUEST_LDTR:
2275 	case VM_REG_GUEST_TR:
2276 		svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
2277 		seg = vmcb_segptr(vmcb, reg);
2278 		/*
2279 		 * Map seg_desc access to VMCB attribute format.
2280 		 *
2281 		 * SVM uses the 'P' bit in the segment attributes to indicate a
2282 		 * NULL segment so clear it if the segment is marked unusable.
2283 		 */
2284 		seg->attrib = VMCB_ACCESS2ATTR(desc->access);
2285 		if (SEG_DESC_UNUSABLE(desc->access)) {
2286 			seg->attrib &= ~0x80;
2287 		}
2288 		/*
2289 		 * Keep CPL synced with the DPL specified for %ss.
2290 		 *
2291 		 * KVM notes that a SYSRET to non-cpl-3 is possible on AMD
2292 		 * (unlike Intel), but accepts such a possible deviation for
2293 		 * what is otherwise unreasonable behavior for a guest OS, since
2294 		 * they do the same synchronization.
2295 		 */
2296 		if (reg == VM_REG_GUEST_SS) {
2297 			vmcb->state.cpl = SEG_DESC_DPL(desc->access);
2298 		}
2299 		break;
2300 
2301 	case VM_REG_GUEST_GDTR:
2302 	case VM_REG_GUEST_IDTR:
2303 		svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
2304 		seg = vmcb_segptr(vmcb, reg);
2305 		break;
2306 
2307 	default:
2308 		return (EINVAL);
2309 	}
2310 
2311 	ASSERT(seg != NULL);
2312 	seg->base = desc->base;
2313 	seg->limit = desc->limit;
2314 
2315 	return (0);
2316 }
2317 
2318 static int
2319 svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2320 {
2321 	struct vmcb *vmcb;
2322 	struct svm_softc *sc;
2323 	struct vmcb_segment *seg;
2324 
2325 	sc = arg;
2326 	vmcb = svm_get_vmcb(sc, vcpu);
2327 
2328 	switch (reg) {
2329 	case VM_REG_GUEST_DS:
2330 	case VM_REG_GUEST_ES:
2331 	case VM_REG_GUEST_FS:
2332 	case VM_REG_GUEST_GS:
2333 	case VM_REG_GUEST_SS:
2334 	case VM_REG_GUEST_LDTR:
2335 		seg = vmcb_segptr(vmcb, reg);
2336 		desc->access = VMCB_ATTR2ACCESS(seg->attrib);
2337 		/*
2338 		 * VT-x uses bit 16 to indicate a segment that has been loaded
2339 		 * with a NULL selector (aka unusable). The 'desc->access'
2340 		 * field is interpreted in the VT-x format by the
2341 		 * processor-independent code.
2342 		 *
2343 		 * SVM uses the 'P' bit to convey the same information so
2344 		 * convert it into the VT-x format. For more details refer to
2345 		 * section "Segment State in the VMCB" in APMv2.
2346 		 */
2347 		if ((desc->access & 0x80) == 0) {
2348 			/* Unusable segment */
2349 			desc->access |= 0x10000;
2350 		}
2351 		break;
2352 
2353 	case VM_REG_GUEST_CS:
2354 	case VM_REG_GUEST_TR:
2355 		seg = vmcb_segptr(vmcb, reg);
2356 		desc->access = VMCB_ATTR2ACCESS(seg->attrib);
2357 		break;
2358 
2359 	case VM_REG_GUEST_GDTR:
2360 	case VM_REG_GUEST_IDTR:
2361 		seg = vmcb_segptr(vmcb, reg);
2362 		/*
2363 		 * Since there are no access bits associated with the GDTR or
2364 		 * the IDTR, zero out the field to ensure it does not contain
2365 		 * garbage which might confuse the consumer.
2366 		 */
2367 		desc->access = 0;
2368 		break;
2369 
2370 	default:
2371 		return (EINVAL);
2372 	}
2373 
2374 	ASSERT(seg != NULL);
2375 	desc->base = seg->base;
2376 	desc->limit = seg->limit;
2377 	return (0);
2378 }
2379 
2380 static int
2381 svm_get_msr(void *arg, int vcpu, uint32_t msr, uint64_t *valp)
2382 {
2383 	struct svm_softc *sc = arg;
2384 	struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
2385 	const uint64_t *msrp = vmcb_msr_ptr(vmcb, msr, NULL);
2386 
2387 	if (msrp != NULL) {
2388 		*valp = *msrp;
2389 		return (0);
2390 	}
2391 
2392 	return (EINVAL);
2393 }
2394 
2395 static int
2396 svm_set_msr(void *arg, int vcpu, uint32_t msr, uint64_t val)
2397 {
2398 	struct svm_softc *sc = arg;
2399 	struct vmcb *vmcb = svm_get_vmcb(sc, vcpu);
2400 
2401 	uint32_t dirty = 0;
2402 	uint64_t *msrp = vmcb_msr_ptr(vmcb, msr, &dirty);
2403 	if (msrp == NULL) {
2404 		return (EINVAL);
2405 	}
2406 	switch (msr) {
2407 	case MSR_EFER:
2408 		/*
2409 		 * For now, just clone the logic from
2410 		 * svm_setreg():
2411 		 *
2412 		 * EFER_SVM must always be set when the guest is
2413 		 * executing
2414 		 */
2415 		*msrp = val | EFER_SVM;
2416 		break;
2417 	/* TODO: other necessary MSR masking */
2418 	default:
2419 		*msrp = val;
2420 		break;
2421 	}
2422 	if (dirty != 0) {
2423 		svm_set_dirty(sc, vcpu, dirty);
2424 	}
2425 	return (0);
2426 
2427 }
2428 
2429 static int
2430 svm_setcap(void *arg, int vcpu, int type, int val)
2431 {
2432 	struct svm_softc *sc;
2433 	int error;
2434 
2435 	sc = arg;
2436 	error = 0;
2437 	switch (type) {
2438 	case VM_CAP_HALT_EXIT:
2439 		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2440 		    VMCB_INTCPT_HLT, val);
2441 		break;
2442 	case VM_CAP_PAUSE_EXIT:
2443 		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2444 		    VMCB_INTCPT_PAUSE, val);
2445 		break;
2446 	default:
2447 		error = ENOENT;
2448 		break;
2449 	}
2450 	return (error);
2451 }
2452 
2453 static int
2454 svm_getcap(void *arg, int vcpu, int type, int *retval)
2455 {
2456 	struct svm_softc *sc;
2457 	int error;
2458 
2459 	sc = arg;
2460 	error = 0;
2461 
2462 	switch (type) {
2463 	case VM_CAP_HALT_EXIT:
2464 		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2465 		    VMCB_INTCPT_HLT);
2466 		break;
2467 	case VM_CAP_PAUSE_EXIT:
2468 		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2469 		    VMCB_INTCPT_PAUSE);
2470 		break;
2471 	default:
2472 		error = ENOENT;
2473 		break;
2474 	}
2475 	return (error);
2476 }
2477 
2478 static struct vlapic *
2479 svm_vlapic_init(void *arg, int vcpuid)
2480 {
2481 	struct svm_softc *svm_sc;
2482 	struct vlapic *vlapic;
2483 
2484 	svm_sc = arg;
2485 	vlapic = kmem_zalloc(sizeof (struct vlapic), KM_SLEEP);
2486 	vlapic->vm = svm_sc->vm;
2487 	vlapic->vcpuid = vcpuid;
2488 	vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2489 
2490 	vlapic_init(vlapic);
2491 
2492 	return (vlapic);
2493 }
2494 
2495 static void
2496 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2497 {
2498 	vlapic_cleanup(vlapic);
2499 	kmem_free(vlapic, sizeof (struct vlapic));
2500 }
2501 
2502 static void
2503 svm_savectx(void *arg, int vcpu)
2504 {
2505 	struct svm_softc *sc = arg;
2506 
2507 	if (sc->vcpu[vcpu].loaded) {
2508 		svm_msr_guest_exit(sc, vcpu);
2509 	}
2510 }
2511 
2512 static void
2513 svm_restorectx(void *arg, int vcpu)
2514 {
2515 	struct svm_softc *sc = arg;
2516 
2517 	if (sc->vcpu[vcpu].loaded) {
2518 		svm_msr_guest_enter(sc, vcpu);
2519 	}
2520 }
2521 
2522 struct vmm_ops vmm_ops_amd = {
2523 	.init		= svm_init,
2524 	.cleanup	= svm_cleanup,
2525 	.resume		= svm_restore,
2526 
2527 	.vminit		= svm_vminit,
2528 	.vmrun		= svm_vmrun,
2529 	.vmcleanup	= svm_vmcleanup,
2530 	.vmgetreg	= svm_getreg,
2531 	.vmsetreg	= svm_setreg,
2532 	.vmgetdesc	= svm_getdesc,
2533 	.vmsetdesc	= svm_setdesc,
2534 	.vmgetcap	= svm_getcap,
2535 	.vmsetcap	= svm_setcap,
2536 	.vlapic_init	= svm_vlapic_init,
2537 	.vlapic_cleanup	= svm_vlapic_cleanup,
2538 
2539 	.vmsavectx	= svm_savectx,
2540 	.vmrestorectx	= svm_restorectx,
2541 
2542 	.vmgetmsr	= svm_get_msr,
2543 	.vmsetmsr	= svm_set_msr,
2544 };
2545