1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "opt_bhyve_snapshot.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/pcpu.h>
38 #include <sys/proc.h>
39 #include <sys/reg.h>
40 #include <sys/smr.h>
41 #include <sys/sysctl.h>
42
43 #include <vm/vm.h>
44 #include <vm/vm_extern.h>
45 #include <vm/pmap.h>
46
47 #include <machine/cpufunc.h>
48 #include <machine/psl.h>
49 #include <machine/md_var.h>
50 #include <machine/specialreg.h>
51 #include <machine/smp.h>
52 #include <machine/vmm.h>
53 #include <machine/vmm_dev.h>
54 #include <machine/vmm_instruction_emul.h>
55 #include <machine/vmm_snapshot.h>
56
57 #include "vmm_lapic.h"
58 #include "vmm_stat.h"
59 #include "vmm_ktr.h"
60 #include "vmm_ioport.h"
61 #include "vatpic.h"
62 #include "vlapic.h"
63 #include "vlapic_priv.h"
64
65 #include "x86.h"
66 #include "vmcb.h"
67 #include "svm.h"
68 #include "svm_softc.h"
69 #include "svm_msr.h"
70 #include "npt.h"
71
72 SYSCTL_DECL(_hw_vmm);
73 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
74 NULL);
75
76 /*
77 * SVM CPUID function 0x8000_000A, edx bit decoding.
78 */
79 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
80 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
81 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
82 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
83 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
84 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
85 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
86 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
87 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
88 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
89 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */
90
91 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \
92 VMCB_CACHE_IOPM | \
93 VMCB_CACHE_I | \
94 VMCB_CACHE_TPR | \
95 VMCB_CACHE_CR2 | \
96 VMCB_CACHE_CR | \
97 VMCB_CACHE_DR | \
98 VMCB_CACHE_DT | \
99 VMCB_CACHE_SEG | \
100 VMCB_CACHE_NP)
101
102 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
103 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
104 0, NULL);
105
106 static MALLOC_DEFINE(M_SVM, "svm", "svm");
107 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
108
109 static uint32_t svm_feature = ~0U; /* AMD SVM features. */
110 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0,
111 "SVM features advertised by CPUID.8000000AH:EDX");
112
113 static int disable_npf_assist;
114 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
115 &disable_npf_assist, 0, NULL);
116
117 /* Maximum ASIDs supported by the processor */
118 static uint32_t nasid;
119 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
120 "Number of ASIDs supported by this processor");
121
122 /* Current ASID generation for each host cpu */
123 static struct asid asid[MAXCPU];
124
125 /* SVM host state saved area of size 4KB for each physical core. */
126 static uint8_t *hsave;
127
128 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
129 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
130 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
131
132 static int svm_getdesc(void *vcpui, int reg, struct seg_desc *desc);
133 static int svm_setreg(void *vcpui, int ident, uint64_t val);
134 static int svm_getreg(void *vcpui, int ident, uint64_t *val);
135 static __inline int
flush_by_asid(void)136 flush_by_asid(void)
137 {
138
139 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
140 }
141
142 static __inline int
decode_assist(void)143 decode_assist(void)
144 {
145
146 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
147 }
148
149 static void
svm_disable(void * arg __unused)150 svm_disable(void *arg __unused)
151 {
152 uint64_t efer;
153
154 efer = rdmsr(MSR_EFER);
155 efer &= ~EFER_SVM;
156 wrmsr(MSR_EFER, efer);
157 }
158
159 /*
160 * Disable SVM on all CPUs.
161 */
162 static int
svm_modcleanup(void)163 svm_modcleanup(void)
164 {
165
166 smp_rendezvous(NULL, svm_disable, NULL, NULL);
167
168 if (hsave != NULL)
169 kmem_free(hsave, (mp_maxid + 1) * PAGE_SIZE);
170
171 return (0);
172 }
173
174 /*
175 * Verify that all the features required by bhyve are available.
176 */
177 static int
check_svm_features(void)178 check_svm_features(void)
179 {
180 u_int regs[4];
181
182 /* CPUID Fn8000_000A is for SVM */
183 do_cpuid(0x8000000A, regs);
184 svm_feature &= regs[3];
185
186 /*
187 * The number of ASIDs can be configured to be less than what is
188 * supported by the hardware but not more.
189 */
190 if (nasid == 0 || nasid > regs[1])
191 nasid = regs[1];
192 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
193
194 /* bhyve requires the Nested Paging feature */
195 if (!(svm_feature & AMD_CPUID_SVM_NP)) {
196 printf("SVM: Nested Paging feature not available.\n");
197 return (ENXIO);
198 }
199
200 /* bhyve requires the NRIP Save feature */
201 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
202 printf("SVM: NRIP Save feature not available.\n");
203 return (ENXIO);
204 }
205
206 return (0);
207 }
208
209 static void
svm_enable(void * arg __unused)210 svm_enable(void *arg __unused)
211 {
212 uint64_t efer;
213
214 efer = rdmsr(MSR_EFER);
215 efer |= EFER_SVM;
216 wrmsr(MSR_EFER, efer);
217
218 wrmsr(MSR_VM_HSAVE_PA, vtophys(&hsave[curcpu * PAGE_SIZE]));
219 }
220
221 /*
222 * Return 1 if SVM is enabled on this processor and 0 otherwise.
223 */
224 static int
svm_available(void)225 svm_available(void)
226 {
227 uint64_t msr;
228
229 /* Section 15.4 Enabling SVM from APM2. */
230 if ((amd_feature2 & AMDID2_SVM) == 0) {
231 printf("SVM: not available.\n");
232 return (0);
233 }
234
235 msr = rdmsr(MSR_VM_CR);
236 if ((msr & VM_CR_SVMDIS) != 0) {
237 printf("SVM: disabled by BIOS.\n");
238 return (0);
239 }
240
241 return (1);
242 }
243
244 static int
svm_modinit(int ipinum)245 svm_modinit(int ipinum)
246 {
247 int error, cpu;
248
249 if (!svm_available())
250 return (ENXIO);
251
252 error = check_svm_features();
253 if (error)
254 return (error);
255
256 vmcb_clean &= VMCB_CACHE_DEFAULT;
257
258 for (cpu = 0; cpu < MAXCPU; cpu++) {
259 /*
260 * Initialize the host ASIDs to their "highest" valid values.
261 *
262 * The next ASID allocation will rollover both 'gen' and 'num'
263 * and start off the sequence at {1,1}.
264 */
265 asid[cpu].gen = ~0UL;
266 asid[cpu].num = nasid - 1;
267 }
268
269 svm_msr_init();
270 svm_npt_init(ipinum);
271
272 /* Enable SVM on all CPUs */
273 hsave = kmem_malloc((mp_maxid + 1) * PAGE_SIZE, M_WAITOK | M_ZERO);
274 smp_rendezvous(NULL, svm_enable, NULL, NULL);
275
276 return (0);
277 }
278
279 static void
svm_modresume(void)280 svm_modresume(void)
281 {
282
283 svm_enable(NULL);
284 }
285
286 #ifdef BHYVE_SNAPSHOT
287 void
svm_set_tsc_offset(struct svm_vcpu * vcpu,uint64_t offset)288 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)
289 {
290 struct vmcb_ctrl *ctrl;
291
292 ctrl = svm_get_vmcb_ctrl(vcpu);
293 ctrl->tsc_offset = offset;
294
295 svm_set_dirty(vcpu, VMCB_CACHE_I);
296 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset);
297
298 vm_set_tsc_offset(vcpu->vcpu, offset);
299 }
300 #endif
301
302 /* Pentium compatible MSRs */
303 #define MSR_PENTIUM_START 0
304 #define MSR_PENTIUM_END 0x1FFF
305 /* AMD 6th generation and Intel compatible MSRs */
306 #define MSR_AMD6TH_START 0xC0000000UL
307 #define MSR_AMD6TH_END 0xC0001FFFUL
308 /* AMD 7th and 8th generation compatible MSRs */
309 #define MSR_AMD7TH_START 0xC0010000UL
310 #define MSR_AMD7TH_END 0xC0011FFFUL
311
312 /*
313 * Get the index and bit position for a MSR in permission bitmap.
314 * Two bits are used for each MSR: lower bit for read and higher bit for write.
315 */
316 static int
svm_msr_index(uint64_t msr,int * index,int * bit)317 svm_msr_index(uint64_t msr, int *index, int *bit)
318 {
319 uint32_t base, off;
320
321 *index = -1;
322 *bit = (msr % 4) * 2;
323 base = 0;
324
325 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
326 *index = msr / 4;
327 return (0);
328 }
329
330 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
331 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
332 off = (msr - MSR_AMD6TH_START);
333 *index = (off + base) / 4;
334 return (0);
335 }
336
337 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
338 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
339 off = (msr - MSR_AMD7TH_START);
340 *index = (off + base) / 4;
341 return (0);
342 }
343
344 return (EINVAL);
345 }
346
347 /*
348 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
349 */
350 static void
svm_msr_perm(uint8_t * perm_bitmap,uint64_t msr,bool read,bool write)351 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
352 {
353 int index, bit, error __diagused;
354
355 error = svm_msr_index(msr, &index, &bit);
356 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
357 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
358 ("%s: invalid index %d for msr %#lx", __func__, index, msr));
359 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
360 "msr %#lx", __func__, bit, msr));
361
362 if (read)
363 perm_bitmap[index] &= ~(1UL << bit);
364
365 if (write)
366 perm_bitmap[index] &= ~(2UL << bit);
367 }
368
369 static void
svm_msr_rw_ok(uint8_t * perm_bitmap,uint64_t msr)370 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
371 {
372
373 svm_msr_perm(perm_bitmap, msr, true, true);
374 }
375
376 static void
svm_msr_rd_ok(uint8_t * perm_bitmap,uint64_t msr)377 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
378 {
379
380 svm_msr_perm(perm_bitmap, msr, true, false);
381 }
382
383 static __inline int
svm_get_intercept(struct svm_vcpu * vcpu,int idx,uint32_t bitmask)384 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask)
385 {
386 struct vmcb_ctrl *ctrl;
387
388 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
389
390 ctrl = svm_get_vmcb_ctrl(vcpu);
391 return (ctrl->intercept[idx] & bitmask ? 1 : 0);
392 }
393
394 static __inline void
svm_set_intercept(struct svm_vcpu * vcpu,int idx,uint32_t bitmask,int enabled)395 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled)
396 {
397 struct vmcb_ctrl *ctrl;
398 uint32_t oldval;
399
400 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
401
402 ctrl = svm_get_vmcb_ctrl(vcpu);
403 oldval = ctrl->intercept[idx];
404
405 if (enabled)
406 ctrl->intercept[idx] |= bitmask;
407 else
408 ctrl->intercept[idx] &= ~bitmask;
409
410 if (ctrl->intercept[idx] != oldval) {
411 svm_set_dirty(vcpu, VMCB_CACHE_I);
412 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx,
413 oldval, ctrl->intercept[idx]);
414 }
415 }
416
417 static __inline void
svm_disable_intercept(struct svm_vcpu * vcpu,int off,uint32_t bitmask)418 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
419 {
420
421 svm_set_intercept(vcpu, off, bitmask, 0);
422 }
423
424 static __inline void
svm_enable_intercept(struct svm_vcpu * vcpu,int off,uint32_t bitmask)425 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
426 {
427
428 svm_set_intercept(vcpu, off, bitmask, 1);
429 }
430
431 static void
vmcb_init(struct svm_softc * sc,struct svm_vcpu * vcpu,uint64_t iopm_base_pa,uint64_t msrpm_base_pa,uint64_t np_pml4)432 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
433 uint64_t msrpm_base_pa, uint64_t np_pml4)
434 {
435 struct vmcb_ctrl *ctrl;
436 struct vmcb_state *state;
437 uint32_t mask;
438 int n;
439
440 ctrl = svm_get_vmcb_ctrl(vcpu);
441 state = svm_get_vmcb_state(vcpu);
442
443 ctrl->iopm_base_pa = iopm_base_pa;
444 ctrl->msrpm_base_pa = msrpm_base_pa;
445
446 /* Enable nested paging */
447 ctrl->np_enable = 1;
448 ctrl->n_cr3 = np_pml4;
449
450 /*
451 * Intercept accesses to the control registers that are not shadowed
452 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
453 */
454 for (n = 0; n < 16; n++) {
455 mask = (BIT(n) << 16) | BIT(n);
456 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
457 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask);
458 else
459 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask);
460 }
461
462 /*
463 * Intercept everything when tracing guest exceptions otherwise
464 * just intercept machine check exception.
465 */
466 if (vcpu_trace_exceptions(vcpu->vcpu)) {
467 for (n = 0; n < 32; n++) {
468 /*
469 * Skip unimplemented vectors in the exception bitmap.
470 */
471 if (n == 2 || n == 9) {
472 continue;
473 }
474 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n));
475 }
476 } else {
477 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
478 }
479
480 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
481 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
482 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
483 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
484 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
485 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
486 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE);
490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
492
493 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
494 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
495
496 /*
497 * Intercept SVM instructions since AMD enables them in guests otherwise.
498 * Non-intercepted VMMCALL causes #UD, skip it.
499 */
500 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
501 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
502 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
503 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
504 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
505 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
506 if (vcpu_trap_wbinvd(vcpu->vcpu)) {
507 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
508 VMCB_INTCPT_WBINVD);
509 }
510
511 /*
512 * From section "Canonicalization and Consistency Checks" in APMv2
513 * the VMRUN intercept bit must be set to pass the consistency check.
514 */
515 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
516
517 /*
518 * The ASID will be set to a non-zero value just before VMRUN.
519 */
520 ctrl->asid = 0;
521
522 /*
523 * Section 15.21.1, Interrupt Masking in EFLAGS
524 * Section 15.21.2, Virtualizing APIC.TPR
525 *
526 * This must be set for %rflag and %cr8 isolation of guest and host.
527 */
528 ctrl->v_intr_masking = 1;
529
530 /* Enable Last Branch Record aka LBR for debugging */
531 ctrl->lbr_virt_en = 1;
532 state->dbgctl = BIT(0);
533
534 /* EFER_SVM must always be set when the guest is executing */
535 state->efer = EFER_SVM;
536
537 /* Set up the PAT to power-on state */
538 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) |
539 PAT_VALUE(1, PAT_WRITE_THROUGH) |
540 PAT_VALUE(2, PAT_UNCACHED) |
541 PAT_VALUE(3, PAT_UNCACHEABLE) |
542 PAT_VALUE(4, PAT_WRITE_BACK) |
543 PAT_VALUE(5, PAT_WRITE_THROUGH) |
544 PAT_VALUE(6, PAT_UNCACHED) |
545 PAT_VALUE(7, PAT_UNCACHEABLE);
546
547 /* Set up DR6/7 to power-on state */
548 state->dr6 = DBREG_DR6_RESERVED1;
549 state->dr7 = DBREG_DR7_RESERVED1;
550 }
551
552 /*
553 * Initialize a virtual machine.
554 */
555 static void *
svm_init(struct vm * vm,pmap_t pmap)556 svm_init(struct vm *vm, pmap_t pmap)
557 {
558 struct svm_softc *svm_sc;
559
560 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
561
562 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM,
563 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
564 if (svm_sc->msr_bitmap == NULL)
565 panic("contigmalloc of SVM MSR bitmap failed");
566 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM,
567 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
568 if (svm_sc->iopm_bitmap == NULL)
569 panic("contigmalloc of SVM IO bitmap failed");
570
571 svm_sc->vm = vm;
572 svm_sc->nptp = vtophys(pmap->pm_pmltop);
573
574 /*
575 * Intercept read and write accesses to all MSRs.
576 */
577 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
578
579 /*
580 * Access to the following MSRs is redirected to the VMCB when the
581 * guest is executing. Therefore it is safe to allow the guest to
582 * read/write these MSRs directly without hypervisor involvement.
583 */
584 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
585 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
586 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
587
588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
596
597 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
598
599 /*
600 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
601 */
602 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
603
604 /* Intercept access to all I/O ports. */
605 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
606
607 return (svm_sc);
608 }
609
610 static void *
svm_vcpu_init(void * vmi,struct vcpu * vcpu1,int vcpuid)611 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
612 {
613 struct svm_softc *sc = vmi;
614 struct svm_vcpu *vcpu;
615
616 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO);
617 vcpu->sc = sc;
618 vcpu->vcpu = vcpu1;
619 vcpu->vcpuid = vcpuid;
620 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM,
621 M_WAITOK | M_ZERO);
622 vcpu->nextrip = ~0;
623 vcpu->lastcpu = NOCPU;
624 vcpu->vmcb_pa = vtophys(vcpu->vmcb);
625 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap),
626 sc->nptp);
627 svm_msr_guest_init(sc, vcpu);
628 return (vcpu);
629 }
630
631 /*
632 * Collateral for a generic SVM VM-exit.
633 */
634 static void
vm_exit_svm(struct vm_exit * vme,uint64_t code,uint64_t info1,uint64_t info2)635 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
636 {
637
638 vme->exitcode = VM_EXITCODE_SVM;
639 vme->u.svm.exitcode = code;
640 vme->u.svm.exitinfo1 = info1;
641 vme->u.svm.exitinfo2 = info2;
642 }
643
644 static int
svm_cpl(struct vmcb_state * state)645 svm_cpl(struct vmcb_state *state)
646 {
647
648 /*
649 * From APMv2:
650 * "Retrieve the CPL from the CPL field in the VMCB, not
651 * from any segment DPL"
652 */
653 return (state->cpl);
654 }
655
656 static enum vm_cpu_mode
svm_vcpu_mode(struct vmcb * vmcb)657 svm_vcpu_mode(struct vmcb *vmcb)
658 {
659 struct vmcb_segment seg;
660 struct vmcb_state *state;
661 int error __diagused;
662
663 state = &vmcb->state;
664
665 if (state->efer & EFER_LMA) {
666 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
667 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
668 error));
669
670 /*
671 * Section 4.8.1 for APM2, check if Code Segment has
672 * Long attribute set in descriptor.
673 */
674 if (seg.attrib & VMCB_CS_ATTRIB_L)
675 return (CPU_MODE_64BIT);
676 else
677 return (CPU_MODE_COMPATIBILITY);
678 } else if (state->cr0 & CR0_PE) {
679 return (CPU_MODE_PROTECTED);
680 } else {
681 return (CPU_MODE_REAL);
682 }
683 }
684
685 static enum vm_paging_mode
svm_paging_mode(uint64_t cr0,uint64_t cr4,uint64_t efer)686 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
687 {
688
689 if ((cr0 & CR0_PG) == 0)
690 return (PAGING_MODE_FLAT);
691 if ((cr4 & CR4_PAE) == 0)
692 return (PAGING_MODE_32);
693 if (efer & EFER_LME)
694 return (PAGING_MODE_64);
695 else
696 return (PAGING_MODE_PAE);
697 }
698
699 /*
700 * ins/outs utility routines
701 */
702 static uint64_t
svm_inout_str_index(struct svm_regctx * regs,int in)703 svm_inout_str_index(struct svm_regctx *regs, int in)
704 {
705 uint64_t val;
706
707 val = in ? regs->sctx_rdi : regs->sctx_rsi;
708
709 return (val);
710 }
711
712 static uint64_t
svm_inout_str_count(struct svm_regctx * regs,int rep)713 svm_inout_str_count(struct svm_regctx *regs, int rep)
714 {
715 uint64_t val;
716
717 val = rep ? regs->sctx_rcx : 1;
718
719 return (val);
720 }
721
722 static void
svm_inout_str_seginfo(struct svm_vcpu * vcpu,int64_t info1,int in,struct vm_inout_str * vis)723 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in,
724 struct vm_inout_str *vis)
725 {
726 int error __diagused, s;
727
728 if (in) {
729 vis->seg_name = VM_REG_GUEST_ES;
730 } else {
731 /* The segment field has standard encoding */
732 s = (info1 >> 10) & 0x7;
733 vis->seg_name = vm_segment_name(s);
734 }
735
736 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
737 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
738 }
739
740 static int
svm_inout_str_addrsize(uint64_t info1)741 svm_inout_str_addrsize(uint64_t info1)
742 {
743 uint32_t size;
744
745 size = (info1 >> 7) & 0x7;
746 switch (size) {
747 case 1:
748 return (2); /* 16 bit */
749 case 2:
750 return (4); /* 32 bit */
751 case 4:
752 return (8); /* 64 bit */
753 default:
754 panic("%s: invalid size encoding %d", __func__, size);
755 }
756 }
757
758 static void
svm_paging_info(struct vmcb * vmcb,struct vm_guest_paging * paging)759 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
760 {
761 struct vmcb_state *state;
762
763 state = &vmcb->state;
764 paging->cr3 = state->cr3;
765 paging->cpl = svm_cpl(state);
766 paging->cpu_mode = svm_vcpu_mode(vmcb);
767 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
768 state->efer);
769 }
770
771 #define UNHANDLED 0
772
773 /*
774 * Handle guest I/O intercept.
775 */
776 static int
svm_handle_io(struct svm_vcpu * vcpu,struct vm_exit * vmexit)777 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit)
778 {
779 struct vmcb_ctrl *ctrl;
780 struct vmcb_state *state;
781 struct svm_regctx *regs;
782 struct vm_inout_str *vis;
783 uint64_t info1;
784 int inout_string;
785
786 state = svm_get_vmcb_state(vcpu);
787 ctrl = svm_get_vmcb_ctrl(vcpu);
788 regs = svm_get_guest_regctx(vcpu);
789
790 info1 = ctrl->exitinfo1;
791 inout_string = info1 & BIT(2) ? 1 : 0;
792
793 /*
794 * The effective segment number in EXITINFO1[12:10] is populated
795 * only if the processor has the DecodeAssist capability.
796 *
797 * XXX this is not specified explicitly in APMv2 but can be verified
798 * empirically.
799 */
800 if (inout_string && !decode_assist())
801 return (UNHANDLED);
802
803 vmexit->exitcode = VM_EXITCODE_INOUT;
804 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
805 vmexit->u.inout.string = inout_string;
806 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
807 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
808 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
809 vmexit->u.inout.eax = (uint32_t)(state->rax);
810
811 if (inout_string) {
812 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
813 vis = &vmexit->u.inout_str;
814 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging);
815 vis->rflags = state->rflags;
816 vis->cr0 = state->cr0;
817 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
818 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
819 vis->addrsize = svm_inout_str_addrsize(info1);
820 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis);
821 }
822
823 return (UNHANDLED);
824 }
825
826 static int
npf_fault_type(uint64_t exitinfo1)827 npf_fault_type(uint64_t exitinfo1)
828 {
829
830 if (exitinfo1 & VMCB_NPF_INFO1_W)
831 return (VM_PROT_WRITE);
832 else if (exitinfo1 & VMCB_NPF_INFO1_ID)
833 return (VM_PROT_EXECUTE);
834 else
835 return (VM_PROT_READ);
836 }
837
838 static bool
svm_npf_emul_fault(uint64_t exitinfo1)839 svm_npf_emul_fault(uint64_t exitinfo1)
840 {
841
842 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
843 return (false);
844 }
845
846 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
847 return (false);
848 }
849
850 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
851 return (false);
852 }
853
854 return (true);
855 }
856
857 static void
svm_handle_inst_emul(struct vmcb * vmcb,uint64_t gpa,struct vm_exit * vmexit)858 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
859 {
860 struct vm_guest_paging *paging;
861 struct vmcb_segment seg;
862 struct vmcb_ctrl *ctrl;
863 char *inst_bytes;
864 int error __diagused, inst_len;
865
866 ctrl = &vmcb->ctrl;
867 paging = &vmexit->u.inst_emul.paging;
868
869 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
870 vmexit->u.inst_emul.gpa = gpa;
871 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
872 svm_paging_info(vmcb, paging);
873
874 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
875 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
876
877 switch(paging->cpu_mode) {
878 case CPU_MODE_REAL:
879 vmexit->u.inst_emul.cs_base = seg.base;
880 vmexit->u.inst_emul.cs_d = 0;
881 break;
882 case CPU_MODE_PROTECTED:
883 case CPU_MODE_COMPATIBILITY:
884 vmexit->u.inst_emul.cs_base = seg.base;
885
886 /*
887 * Section 4.8.1 of APM2, Default Operand Size or D bit.
888 */
889 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
890 1 : 0;
891 break;
892 default:
893 vmexit->u.inst_emul.cs_base = 0;
894 vmexit->u.inst_emul.cs_d = 0;
895 break;
896 }
897
898 /*
899 * Copy the instruction bytes into 'vie' if available.
900 */
901 if (decode_assist() && !disable_npf_assist) {
902 inst_len = ctrl->inst_len;
903 inst_bytes = ctrl->inst_bytes;
904 } else {
905 inst_len = 0;
906 inst_bytes = NULL;
907 }
908 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
909 }
910
911 #ifdef KTR
912 static const char *
intrtype_to_str(int intr_type)913 intrtype_to_str(int intr_type)
914 {
915 switch (intr_type) {
916 case VMCB_EVENTINJ_TYPE_INTR:
917 return ("hwintr");
918 case VMCB_EVENTINJ_TYPE_NMI:
919 return ("nmi");
920 case VMCB_EVENTINJ_TYPE_INTn:
921 return ("swintr");
922 case VMCB_EVENTINJ_TYPE_EXCEPTION:
923 return ("exception");
924 default:
925 panic("%s: unknown intr_type %d", __func__, intr_type);
926 }
927 }
928 #endif
929
930 /*
931 * Inject an event to vcpu as described in section 15.20, "Event injection".
932 */
933 static void
svm_eventinject(struct svm_vcpu * vcpu,int intr_type,int vector,uint32_t error,bool ec_valid)934 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector,
935 uint32_t error, bool ec_valid)
936 {
937 struct vmcb_ctrl *ctrl;
938
939 ctrl = svm_get_vmcb_ctrl(vcpu);
940
941 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
942 ("%s: event already pending %#lx", __func__, ctrl->eventinj));
943
944 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d",
945 __func__, vector));
946
947 switch (intr_type) {
948 case VMCB_EVENTINJ_TYPE_INTR:
949 case VMCB_EVENTINJ_TYPE_NMI:
950 case VMCB_EVENTINJ_TYPE_INTn:
951 break;
952 case VMCB_EVENTINJ_TYPE_EXCEPTION:
953 if (vector >= 0 && vector <= 31 && vector != 2)
954 break;
955 /* FALLTHROUGH */
956 default:
957 panic("%s: invalid intr_type/vector: %d/%d", __func__,
958 intr_type, vector);
959 }
960 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
961 if (ec_valid) {
962 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
963 ctrl->eventinj |= (uint64_t)error << 32;
964 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x",
965 intrtype_to_str(intr_type), vector, error);
966 } else {
967 SVM_CTR2(vcpu, "Injecting %s at vector %d",
968 intrtype_to_str(intr_type), vector);
969 }
970 }
971
972 static void
svm_update_virqinfo(struct svm_vcpu * vcpu)973 svm_update_virqinfo(struct svm_vcpu *vcpu)
974 {
975 struct vlapic *vlapic;
976 struct vmcb_ctrl *ctrl;
977
978 vlapic = vm_lapic(vcpu->vcpu);
979 ctrl = svm_get_vmcb_ctrl(vcpu);
980
981 /* Update %cr8 in the emulated vlapic */
982 vlapic_set_cr8(vlapic, ctrl->v_tpr);
983
984 /* Virtual interrupt injection is not used. */
985 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
986 "v_intr_vector %d", __func__, ctrl->v_intr_vector));
987 }
988
989 static void
svm_save_intinfo(struct svm_softc * svm_sc,struct svm_vcpu * vcpu)990 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
991 {
992 struct vmcb_ctrl *ctrl;
993 uint64_t intinfo;
994
995 ctrl = svm_get_vmcb_ctrl(vcpu);
996 intinfo = ctrl->exitintinfo;
997 if (!VMCB_EXITINTINFO_VALID(intinfo))
998 return;
999
1000 /*
1001 * From APMv2, Section "Intercepts during IDT interrupt delivery"
1002 *
1003 * If a #VMEXIT happened during event delivery then record the event
1004 * that was being delivered.
1005 */
1006 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
1007 VMCB_EXITINTINFO_VECTOR(intinfo));
1008 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
1009 vm_exit_intinfo(vcpu->vcpu, intinfo);
1010 }
1011
1012 #ifdef INVARIANTS
1013 static __inline int
vintr_intercept_enabled(struct svm_vcpu * vcpu)1014 vintr_intercept_enabled(struct svm_vcpu *vcpu)
1015 {
1016
1017 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR));
1018 }
1019 #endif
1020
1021 static __inline void
enable_intr_window_exiting(struct svm_vcpu * vcpu)1022 enable_intr_window_exiting(struct svm_vcpu *vcpu)
1023 {
1024 struct vmcb_ctrl *ctrl;
1025
1026 ctrl = svm_get_vmcb_ctrl(vcpu);
1027
1028 if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
1029 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
1030 KASSERT(vintr_intercept_enabled(vcpu),
1031 ("%s: vintr intercept should be enabled", __func__));
1032 return;
1033 }
1034
1035 SVM_CTR0(vcpu, "Enable intr window exiting");
1036 ctrl->v_irq = 1;
1037 ctrl->v_ign_tpr = 1;
1038 ctrl->v_intr_vector = 0;
1039 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1040 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1041 }
1042
1043 static __inline void
disable_intr_window_exiting(struct svm_vcpu * vcpu)1044 disable_intr_window_exiting(struct svm_vcpu *vcpu)
1045 {
1046 struct vmcb_ctrl *ctrl;
1047
1048 ctrl = svm_get_vmcb_ctrl(vcpu);
1049
1050 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
1051 KASSERT(!vintr_intercept_enabled(vcpu),
1052 ("%s: vintr intercept should be disabled", __func__));
1053 return;
1054 }
1055
1056 SVM_CTR0(vcpu, "Disable intr window exiting");
1057 ctrl->v_irq = 0;
1058 ctrl->v_intr_vector = 0;
1059 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1060 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1061 }
1062
1063 static int
svm_modify_intr_shadow(struct svm_vcpu * vcpu,uint64_t val)1064 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val)
1065 {
1066 struct vmcb_ctrl *ctrl;
1067 int oldval, newval;
1068
1069 ctrl = svm_get_vmcb_ctrl(vcpu);
1070 oldval = ctrl->intr_shadow;
1071 newval = val ? 1 : 0;
1072 if (newval != oldval) {
1073 ctrl->intr_shadow = newval;
1074 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval);
1075 }
1076 return (0);
1077 }
1078
1079 static int
svm_get_intr_shadow(struct svm_vcpu * vcpu,uint64_t * val)1080 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val)
1081 {
1082 struct vmcb_ctrl *ctrl;
1083
1084 ctrl = svm_get_vmcb_ctrl(vcpu);
1085 *val = ctrl->intr_shadow;
1086 return (0);
1087 }
1088
1089 /*
1090 * Once an NMI is injected it blocks delivery of further NMIs until the handler
1091 * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1092 * to track when the vcpu is done handling the NMI.
1093 */
1094 static int
nmi_blocked(struct svm_vcpu * vcpu)1095 nmi_blocked(struct svm_vcpu *vcpu)
1096 {
1097 int blocked;
1098
1099 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1100 return (blocked);
1101 }
1102
1103 static void
enable_nmi_blocking(struct svm_vcpu * vcpu)1104 enable_nmi_blocking(struct svm_vcpu *vcpu)
1105 {
1106
1107 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked"));
1108 SVM_CTR0(vcpu, "vNMI blocking enabled");
1109 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1110 }
1111
1112 static void
clear_nmi_blocking(struct svm_vcpu * vcpu)1113 clear_nmi_blocking(struct svm_vcpu *vcpu)
1114 {
1115 int error __diagused;
1116
1117 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked"));
1118 SVM_CTR0(vcpu, "vNMI blocking cleared");
1119 /*
1120 * When the IRET intercept is cleared the vcpu will attempt to execute
1121 * the "iret" when it runs next. However, it is possible to inject
1122 * another NMI into the vcpu before the "iret" has actually executed.
1123 *
1124 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1125 * it will trap back into the hypervisor. If an NMI is pending for
1126 * the vcpu it will be injected into the guest.
1127 *
1128 * XXX this needs to be fixed
1129 */
1130 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1131
1132 /*
1133 * Set 'intr_shadow' to prevent an NMI from being injected on the
1134 * immediate VMRUN.
1135 */
1136 error = svm_modify_intr_shadow(vcpu, 1);
1137 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1138 }
1139
1140 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL
1141
1142 static int
svm_write_efer(struct svm_softc * sc,struct svm_vcpu * vcpu,uint64_t newval,bool * retu)1143 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
1144 bool *retu)
1145 {
1146 struct vm_exit *vme;
1147 struct vmcb_state *state;
1148 uint64_t changed, lma, oldval;
1149 int error __diagused;
1150
1151 state = svm_get_vmcb_state(vcpu);
1152
1153 oldval = state->efer;
1154 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
1155
1156 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
1157 changed = oldval ^ newval;
1158
1159 if (newval & EFER_MBZ_BITS)
1160 goto gpf;
1161
1162 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1163 if (changed & EFER_LME) {
1164 if (state->cr0 & CR0_PG)
1165 goto gpf;
1166 }
1167
1168 /* EFER.LMA = EFER.LME & CR0.PG */
1169 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0)
1170 lma = EFER_LMA;
1171 else
1172 lma = 0;
1173
1174 if ((newval & EFER_LMA) != lma)
1175 goto gpf;
1176
1177 if (newval & EFER_NXE) {
1178 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE))
1179 goto gpf;
1180 }
1181
1182 /*
1183 * XXX bhyve does not enforce segment limits in 64-bit mode. Until
1184 * this is fixed flag guest attempt to set EFER_LMSLE as an error.
1185 */
1186 if (newval & EFER_LMSLE) {
1187 vme = vm_exitinfo(vcpu->vcpu);
1188 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
1189 *retu = true;
1190 return (0);
1191 }
1192
1193 if (newval & EFER_FFXSR) {
1194 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR))
1195 goto gpf;
1196 }
1197
1198 if (newval & EFER_TCE) {
1199 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE))
1200 goto gpf;
1201 }
1202
1203 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval);
1204 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
1205 return (0);
1206 gpf:
1207 vm_inject_gp(vcpu->vcpu);
1208 return (0);
1209 }
1210
1211 static int
emulate_wrmsr(struct svm_softc * sc,struct svm_vcpu * vcpu,u_int num,uint64_t val,bool * retu)1212 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
1213 uint64_t val, bool *retu)
1214 {
1215 int error;
1216
1217 if (lapic_msr(num))
1218 error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
1219 else if (num == MSR_EFER)
1220 error = svm_write_efer(sc, vcpu, val, retu);
1221 else
1222 error = svm_wrmsr(vcpu, num, val, retu);
1223
1224 return (error);
1225 }
1226
1227 static int
emulate_rdmsr(struct svm_vcpu * vcpu,u_int num,bool * retu)1228 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu)
1229 {
1230 struct vmcb_state *state;
1231 struct svm_regctx *ctx;
1232 uint64_t result;
1233 int error;
1234
1235 if (lapic_msr(num))
1236 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
1237 else
1238 error = svm_rdmsr(vcpu, num, &result, retu);
1239
1240 if (error == 0) {
1241 state = svm_get_vmcb_state(vcpu);
1242 ctx = svm_get_guest_regctx(vcpu);
1243 state->rax = result & 0xffffffff;
1244 ctx->sctx_rdx = result >> 32;
1245 }
1246
1247 return (error);
1248 }
1249
1250 #ifdef KTR
1251 static const char *
exit_reason_to_str(uint64_t reason)1252 exit_reason_to_str(uint64_t reason)
1253 {
1254 int i;
1255 static char reasonbuf[32];
1256 static const struct {
1257 int reason;
1258 const char *str;
1259 } reasons[] = {
1260 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" },
1261 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" },
1262 { .reason = VMCB_EXIT_NPF, .str = "nptfault" },
1263 { .reason = VMCB_EXIT_PAUSE, .str = "pause" },
1264 { .reason = VMCB_EXIT_HLT, .str = "hlt" },
1265 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" },
1266 { .reason = VMCB_EXIT_IO, .str = "inout" },
1267 { .reason = VMCB_EXIT_MC, .str = "mchk" },
1268 { .reason = VMCB_EXIT_INTR, .str = "extintr" },
1269 { .reason = VMCB_EXIT_NMI, .str = "nmi" },
1270 { .reason = VMCB_EXIT_VINTR, .str = "vintr" },
1271 { .reason = VMCB_EXIT_MSR, .str = "msr" },
1272 { .reason = VMCB_EXIT_IRET, .str = "iret" },
1273 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" },
1274 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" },
1275 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" },
1276 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" },
1277 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" },
1278 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" },
1279 { .reason = VMCB_EXIT_STGI, .str = "stgi" },
1280 { .reason = VMCB_EXIT_CLGI, .str = "clgi" },
1281 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" },
1282 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" },
1283 { .reason = VMCB_EXIT_INVD, .str = "invd" },
1284 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" },
1285 { .reason = VMCB_EXIT_POPF, .str = "popf" },
1286 { .reason = VMCB_EXIT_PUSHF, .str = "pushf" },
1287 };
1288
1289 for (i = 0; i < nitems(reasons); i++) {
1290 if (reasons[i].reason == reason)
1291 return (reasons[i].str);
1292 }
1293 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
1294 return (reasonbuf);
1295 }
1296 #endif /* KTR */
1297
1298 /*
1299 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1300 * that are due to instruction intercepts as well as MSR and IOIO intercepts
1301 * and exceptions caused by INT3, INTO and BOUND instructions.
1302 *
1303 * Return 1 if the nRIP is valid and 0 otherwise.
1304 */
1305 static int
nrip_valid(uint64_t exitcode)1306 nrip_valid(uint64_t exitcode)
1307 {
1308 switch (exitcode) {
1309 case 0x00 ... 0x0F: /* read of CR0 through CR15 */
1310 case 0x10 ... 0x1F: /* write of CR0 through CR15 */
1311 case 0x20 ... 0x2F: /* read of DR0 through DR15 */
1312 case 0x30 ... 0x3F: /* write of DR0 through DR15 */
1313 case 0x43: /* INT3 */
1314 case 0x44: /* INTO */
1315 case 0x45: /* BOUND */
1316 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1317 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1318 return (1);
1319 default:
1320 return (0);
1321 }
1322 }
1323
1324 static int
svm_vmexit(struct svm_softc * svm_sc,struct svm_vcpu * vcpu,struct vm_exit * vmexit)1325 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
1326 struct vm_exit *vmexit)
1327 {
1328 struct vmcb *vmcb;
1329 struct vmcb_state *state;
1330 struct vmcb_ctrl *ctrl;
1331 struct svm_regctx *ctx;
1332 uint64_t code, info1, info2, val;
1333 uint32_t eax, ecx, edx;
1334 int error __diagused, errcode_valid, handled, idtvec, reflect;
1335 bool retu;
1336
1337 ctx = svm_get_guest_regctx(vcpu);
1338 vmcb = svm_get_vmcb(vcpu);
1339 state = &vmcb->state;
1340 ctrl = &vmcb->ctrl;
1341
1342 handled = 0;
1343 code = ctrl->exitcode;
1344 info1 = ctrl->exitinfo1;
1345 info2 = ctrl->exitinfo2;
1346
1347 vmexit->exitcode = VM_EXITCODE_BOGUS;
1348 vmexit->rip = state->rip;
1349 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1350
1351 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
1352
1353 /*
1354 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1355 * in an inconsistent state and can trigger assertions that would
1356 * never happen otherwise.
1357 */
1358 if (code == VMCB_EXIT_INVALID) {
1359 vm_exit_svm(vmexit, code, info1, info2);
1360 return (0);
1361 }
1362
1363 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1364 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
1365
1366 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1367 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
1368 vmexit->inst_length, code, info1, info2));
1369
1370 svm_update_virqinfo(vcpu);
1371 svm_save_intinfo(svm_sc, vcpu);
1372
1373 switch (code) {
1374 case VMCB_EXIT_IRET:
1375 /*
1376 * Restart execution at "iret" but with the intercept cleared.
1377 */
1378 vmexit->inst_length = 0;
1379 clear_nmi_blocking(vcpu);
1380 handled = 1;
1381 break;
1382 case VMCB_EXIT_VINTR: /* interrupt window exiting */
1383 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1);
1384 handled = 1;
1385 break;
1386 case VMCB_EXIT_INTR: /* external interrupt */
1387 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
1388 handled = 1;
1389 break;
1390 case VMCB_EXIT_NMI: /* external NMI */
1391 handled = 1;
1392 break;
1393 case 0x40 ... 0x5F:
1394 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
1395 reflect = 1;
1396 idtvec = code - 0x40;
1397 switch (idtvec) {
1398 case IDT_MC:
1399 /*
1400 * Call the machine check handler by hand. Also don't
1401 * reflect the machine check back into the guest.
1402 */
1403 reflect = 0;
1404 SVM_CTR0(vcpu, "Vectoring to MCE handler");
1405 __asm __volatile("int $18");
1406 break;
1407 case IDT_PF:
1408 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2);
1409 KASSERT(error == 0, ("%s: error %d updating cr2",
1410 __func__, error));
1411 /* fallthru */
1412 case IDT_NP:
1413 case IDT_SS:
1414 case IDT_GP:
1415 case IDT_AC:
1416 case IDT_TS:
1417 errcode_valid = 1;
1418 break;
1419
1420 case IDT_DF:
1421 errcode_valid = 1;
1422 info1 = 0;
1423 break;
1424 case IDT_DB: {
1425 /*
1426 * Check if we are being stepped (RFLAGS.TF)
1427 * and bounce vmexit to userland.
1428 */
1429 bool stepped = 0;
1430 uint64_t dr6 = 0;
1431
1432 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6);
1433 stepped = !!(dr6 & DBREG_DR6_BS);
1434 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) {
1435 vmexit->exitcode = VM_EXITCODE_DB;
1436 vmexit->u.dbg.trace_trap = 1;
1437 vmexit->u.dbg.pushf_intercept = 0;
1438
1439 if (vcpu->dbg.popf_sstep) {
1440 /*
1441 * DB# exit was caused by stepping over
1442 * popf.
1443 */
1444 uint64_t rflags;
1445
1446 vcpu->dbg.popf_sstep = 0;
1447
1448 /*
1449 * Update shadowed TF bit so the next
1450 * setcap(..., RFLAGS_SSTEP, 0) restores
1451 * the correct value
1452 */
1453 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS,
1454 &rflags);
1455 vcpu->dbg.rflags_tf = rflags & PSL_T;
1456 } else if (vcpu->dbg.pushf_sstep) {
1457 /*
1458 * DB# exit was caused by stepping over
1459 * pushf.
1460 */
1461 vcpu->dbg.pushf_sstep = 0;
1462
1463 /*
1464 * Adjusting the pushed rflags after a
1465 * restarted pushf instruction must be
1466 * handled outside of svm.c due to the
1467 * critical_enter() lock being held.
1468 */
1469 vmexit->u.dbg.pushf_intercept = 1;
1470 vmexit->u.dbg.tf_shadow_val =
1471 vcpu->dbg.rflags_tf;
1472 svm_paging_info(svm_get_vmcb(vcpu),
1473 &vmexit->u.dbg.paging);
1474 }
1475
1476 /* Clear DR6 "single-step" bit. */
1477 dr6 &= ~DBREG_DR6_BS;
1478 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6);
1479 KASSERT(error == 0,
1480 ("%s: error %d updating DR6\r\n", __func__,
1481 error));
1482
1483 reflect = 0;
1484 }
1485 break;
1486 }
1487 case IDT_BP:
1488 vmexit->exitcode = VM_EXITCODE_BPT;
1489 vmexit->u.bpt.inst_length = vmexit->inst_length;
1490 vmexit->inst_length = 0;
1491
1492 reflect = 0;
1493 break;
1494 case IDT_OF:
1495 case IDT_BR:
1496 /*
1497 * The 'nrip' field is populated for INT3, INTO and
1498 * BOUND exceptions and this also implies that
1499 * 'inst_length' is non-zero.
1500 *
1501 * Reset 'inst_length' to zero so the guest %rip at
1502 * event injection is identical to what it was when
1503 * the exception originally happened.
1504 */
1505 SVM_CTR2(vcpu, "Reset inst_length from %d "
1506 "to zero before injecting exception %d",
1507 vmexit->inst_length, idtvec);
1508 vmexit->inst_length = 0;
1509 /* fallthru */
1510 default:
1511 errcode_valid = 0;
1512 info1 = 0;
1513 break;
1514 }
1515
1516 if (reflect) {
1517 KASSERT(vmexit->inst_length == 0,
1518 ("invalid inst_length (%d) "
1519 "when reflecting exception %d into guest",
1520 vmexit->inst_length, idtvec));
1521 /* Reflect the exception back into the guest */
1522 SVM_CTR2(vcpu, "Reflecting exception "
1523 "%d/%#x into the guest", idtvec, (int)info1);
1524 error = vm_inject_exception(vcpu->vcpu, idtvec,
1525 errcode_valid, info1, 0);
1526 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
1527 __func__, error));
1528 handled = 1;
1529 }
1530 break;
1531 case VMCB_EXIT_MSR: /* MSR access. */
1532 eax = state->rax;
1533 ecx = ctx->sctx_rcx;
1534 edx = ctx->sctx_rdx;
1535 retu = false;
1536
1537 if (info1) {
1538 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
1539 val = (uint64_t)edx << 32 | eax;
1540 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val);
1541 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1542 vmexit->exitcode = VM_EXITCODE_WRMSR;
1543 vmexit->u.msr.code = ecx;
1544 vmexit->u.msr.wval = val;
1545 } else if (!retu) {
1546 handled = 1;
1547 } else {
1548 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1549 ("emulate_wrmsr retu with bogus exitcode"));
1550 }
1551 } else {
1552 SVM_CTR1(vcpu, "rdmsr %#x", ecx);
1553 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
1554 if (emulate_rdmsr(vcpu, ecx, &retu)) {
1555 vmexit->exitcode = VM_EXITCODE_RDMSR;
1556 vmexit->u.msr.code = ecx;
1557 } else if (!retu) {
1558 handled = 1;
1559 } else {
1560 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1561 ("emulate_rdmsr retu with bogus exitcode"));
1562 }
1563 }
1564 break;
1565 case VMCB_EXIT_IO:
1566 handled = svm_handle_io(vcpu, vmexit);
1567 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
1568 break;
1569 case VMCB_EXIT_CPUID:
1570 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
1571 handled = x86_emulate_cpuid(vcpu->vcpu,
1572 &state->rax, &ctx->sctx_rbx, &ctx->sctx_rcx,
1573 &ctx->sctx_rdx);
1574 break;
1575 case VMCB_EXIT_HLT:
1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
1577 vmexit->exitcode = VM_EXITCODE_HLT;
1578 vmexit->u.hlt.rflags = state->rflags;
1579 break;
1580 case VMCB_EXIT_PAUSE:
1581 vmexit->exitcode = VM_EXITCODE_PAUSE;
1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
1583 break;
1584 case VMCB_EXIT_NPF:
1585 /* EXITINFO2 contains the faulting guest physical address */
1586 if (info1 & VMCB_NPF_INFO1_RSV) {
1587 SVM_CTR2(vcpu, "nested page fault with "
1588 "reserved bits set: info1(%#lx) info2(%#lx)",
1589 info1, info2);
1590 } else if (vm_mem_allocated(vcpu->vcpu, info2)) {
1591 vmexit->exitcode = VM_EXITCODE_PAGING;
1592 vmexit->u.paging.gpa = info2;
1593 vmexit->u.paging.fault_type = npf_fault_type(info1);
1594 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
1595 SVM_CTR3(vcpu, "nested page fault "
1596 "on gpa %#lx/%#lx at rip %#lx",
1597 info2, info1, state->rip);
1598 } else if (svm_npf_emul_fault(info1)) {
1599 svm_handle_inst_emul(vmcb, info2, vmexit);
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
1601 SVM_CTR3(vcpu, "inst_emul fault "
1602 "for gpa %#lx/%#lx at rip %#lx",
1603 info2, info1, state->rip);
1604 }
1605 break;
1606 case VMCB_EXIT_MONITOR:
1607 vmexit->exitcode = VM_EXITCODE_MONITOR;
1608 break;
1609 case VMCB_EXIT_MWAIT:
1610 vmexit->exitcode = VM_EXITCODE_MWAIT;
1611 break;
1612 case VMCB_EXIT_PUSHF: {
1613 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1614 uint64_t rflags;
1615
1616 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1617 /* Restart this instruction. */
1618 vmexit->inst_length = 0;
1619 /* Disable PUSHF intercepts - avoid a loop. */
1620 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
1621 VMCB_INTCPT_PUSHF, 0);
1622 /* Trace restarted instruction. */
1623 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T));
1624 /* Let the IDT_DB handler know that pushf was stepped.
1625 */
1626 vcpu->dbg.pushf_sstep = 1;
1627 handled = 1;
1628 }
1629 break;
1630 }
1631 case VMCB_EXIT_POPF: {
1632 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1633 uint64_t rflags;
1634
1635 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1636 /* Restart this instruction */
1637 vmexit->inst_length = 0;
1638 /* Disable POPF intercepts - avoid a loop*/
1639 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
1640 VMCB_INTCPT_POPF, 0);
1641 /* Trace restarted instruction */
1642 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T));
1643 vcpu->dbg.popf_sstep = 1;
1644 handled = 1;
1645 }
1646 break;
1647 }
1648 case VMCB_EXIT_SHUTDOWN:
1649 case VMCB_EXIT_VMRUN:
1650 case VMCB_EXIT_VMMCALL:
1651 case VMCB_EXIT_VMLOAD:
1652 case VMCB_EXIT_VMSAVE:
1653 case VMCB_EXIT_STGI:
1654 case VMCB_EXIT_CLGI:
1655 case VMCB_EXIT_SKINIT:
1656 case VMCB_EXIT_ICEBP:
1657 case VMCB_EXIT_INVLPGA:
1658 vm_inject_ud(vcpu->vcpu);
1659 handled = 1;
1660 break;
1661 case VMCB_EXIT_INVD:
1662 case VMCB_EXIT_WBINVD:
1663 /* ignore exit */
1664 handled = 1;
1665 break;
1666 default:
1667 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
1668 break;
1669 }
1670
1671 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d",
1672 handled ? "handled" : "unhandled", exit_reason_to_str(code),
1673 vmexit->rip, vmexit->inst_length);
1674
1675 if (handled) {
1676 vmexit->rip += vmexit->inst_length;
1677 vmexit->inst_length = 0;
1678 state->rip = vmexit->rip;
1679 } else {
1680 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1681 /*
1682 * If this VM exit was not claimed by anybody then
1683 * treat it as a generic SVM exit.
1684 */
1685 vm_exit_svm(vmexit, code, info1, info2);
1686 } else {
1687 /*
1688 * The exitcode and collateral have been populated.
1689 * The VM exit will be processed further in userland.
1690 */
1691 }
1692 }
1693 return (handled);
1694 }
1695
1696 static void
svm_inj_intinfo(struct svm_softc * svm_sc,struct svm_vcpu * vcpu)1697 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
1698 {
1699 uint64_t intinfo;
1700
1701 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo))
1702 return;
1703
1704 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
1705 "valid: %#lx", __func__, intinfo));
1706
1707 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1708 VMCB_EXITINTINFO_VECTOR(intinfo),
1709 VMCB_EXITINTINFO_EC(intinfo),
1710 VMCB_EXITINTINFO_EC_VALID(intinfo));
1711 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1);
1712 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo);
1713 }
1714
1715 /*
1716 * Inject event to virtual cpu.
1717 */
1718 static void
svm_inj_interrupts(struct svm_softc * sc,struct svm_vcpu * vcpu,struct vlapic * vlapic)1719 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
1720 struct vlapic *vlapic)
1721 {
1722 struct vmcb_ctrl *ctrl;
1723 struct vmcb_state *state;
1724 uint8_t v_tpr;
1725 int vector, need_intr_window;
1726 int extint_pending;
1727
1728 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) {
1729 return;
1730 }
1731
1732 state = svm_get_vmcb_state(vcpu);
1733 ctrl = svm_get_vmcb_ctrl(vcpu);
1734
1735 need_intr_window = 0;
1736
1737 if (vcpu->nextrip != state->rip) {
1738 ctrl->intr_shadow = 0;
1739 SVM_CTR2(vcpu, "Guest interrupt blocking "
1740 "cleared due to rip change: %#lx/%#lx",
1741 vcpu->nextrip, state->rip);
1742 }
1743
1744 /*
1745 * Inject pending events or exceptions for this vcpu.
1746 *
1747 * An event might be pending because the previous #VMEXIT happened
1748 * during event delivery (i.e. ctrl->exitintinfo).
1749 *
1750 * An event might also be pending because an exception was injected
1751 * by the hypervisor (e.g. #PF during instruction emulation).
1752 */
1753 svm_inj_intinfo(sc, vcpu);
1754
1755 /* NMI event has priority over interrupts. */
1756 if (vm_nmi_pending(vcpu->vcpu)) {
1757 if (nmi_blocked(vcpu)) {
1758 /*
1759 * Can't inject another NMI if the guest has not
1760 * yet executed an "iret" after the last NMI.
1761 */
1762 SVM_CTR0(vcpu, "Cannot inject NMI due "
1763 "to NMI-blocking");
1764 } else if (ctrl->intr_shadow) {
1765 /*
1766 * Can't inject an NMI if the vcpu is in an intr_shadow.
1767 */
1768 SVM_CTR0(vcpu, "Cannot inject NMI due to "
1769 "interrupt shadow");
1770 need_intr_window = 1;
1771 goto done;
1772 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1773 /*
1774 * If there is already an exception/interrupt pending
1775 * then defer the NMI until after that.
1776 */
1777 SVM_CTR1(vcpu, "Cannot inject NMI due to "
1778 "eventinj %#lx", ctrl->eventinj);
1779
1780 /*
1781 * Use self-IPI to trigger a VM-exit as soon as
1782 * possible after the event injection is completed.
1783 *
1784 * This works only if the external interrupt exiting
1785 * is at a lower priority than the event injection.
1786 *
1787 * Although not explicitly specified in APMv2 the
1788 * relative priorities were verified empirically.
1789 */
1790 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */
1791 } else {
1792 vm_nmi_clear(vcpu->vcpu);
1793
1794 /* Inject NMI, vector number is not used */
1795 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
1796 IDT_NMI, 0, false);
1797
1798 /* virtual NMI blocking is now in effect */
1799 enable_nmi_blocking(vcpu);
1800
1801 SVM_CTR0(vcpu, "Injecting vNMI");
1802 }
1803 }
1804
1805 extint_pending = vm_extint_pending(vcpu->vcpu);
1806 if (!extint_pending) {
1807 if (!vlapic_pending_intr(vlapic, &vector))
1808 goto done;
1809 KASSERT(vector >= 16 && vector <= 255,
1810 ("invalid vector %d from local APIC", vector));
1811 } else {
1812 /* Ask the legacy pic for a vector to inject */
1813 vatpic_pending_intr(sc->vm, &vector);
1814 KASSERT(vector >= 0 && vector <= 255,
1815 ("invalid vector %d from INTR", vector));
1816 }
1817
1818 /*
1819 * If the guest has disabled interrupts or is in an interrupt shadow
1820 * then we cannot inject the pending interrupt.
1821 */
1822 if ((state->rflags & PSL_I) == 0) {
1823 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1824 "rflags %#lx", vector, state->rflags);
1825 need_intr_window = 1;
1826 goto done;
1827 }
1828
1829 if (ctrl->intr_shadow) {
1830 SVM_CTR1(vcpu, "Cannot inject vector %d due to "
1831 "interrupt shadow", vector);
1832 need_intr_window = 1;
1833 goto done;
1834 }
1835
1836 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1837 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1838 "eventinj %#lx", vector, ctrl->eventinj);
1839 need_intr_window = 1;
1840 goto done;
1841 }
1842
1843 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1844
1845 if (!extint_pending) {
1846 vlapic_intr_accepted(vlapic, vector);
1847 } else {
1848 vm_extint_clear(vcpu->vcpu);
1849 vatpic_intr_accepted(sc->vm, vector);
1850 }
1851
1852 /*
1853 * Force a VM-exit as soon as the vcpu is ready to accept another
1854 * interrupt. This is done because the PIC might have another vector
1855 * that it wants to inject. Also, if the APIC has a pending interrupt
1856 * that was preempted by the ExtInt then it allows us to inject the
1857 * APIC vector as soon as possible.
1858 */
1859 need_intr_window = 1;
1860 done:
1861 /*
1862 * The guest can modify the TPR by writing to %CR8. In guest mode
1863 * the processor reflects this write to V_TPR without hypervisor
1864 * intervention.
1865 *
1866 * The guest can also modify the TPR by writing to it via the memory
1867 * mapped APIC page. In this case, the write will be emulated by the
1868 * hypervisor. For this reason V_TPR must be updated before every
1869 * VMRUN.
1870 */
1871 v_tpr = vlapic_get_cr8(vlapic);
1872 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
1873 if (ctrl->v_tpr != v_tpr) {
1874 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x",
1875 ctrl->v_tpr, v_tpr);
1876 ctrl->v_tpr = v_tpr;
1877 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1878 }
1879
1880 if (need_intr_window) {
1881 /*
1882 * We use V_IRQ in conjunction with the VINTR intercept to
1883 * trap into the hypervisor as soon as a virtual interrupt
1884 * can be delivered.
1885 *
1886 * Since injected events are not subject to intercept checks
1887 * we need to ensure that the V_IRQ is not actually going to
1888 * be delivered on VM entry. The KASSERT below enforces this.
1889 */
1890 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1891 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
1892 ("Bogus intr_window_exiting: eventinj (%#lx), "
1893 "intr_shadow (%u), rflags (%#lx)",
1894 ctrl->eventinj, ctrl->intr_shadow, state->rflags));
1895 enable_intr_window_exiting(vcpu);
1896 } else {
1897 disable_intr_window_exiting(vcpu);
1898 }
1899 }
1900
1901 static __inline void
restore_host_tss(void)1902 restore_host_tss(void)
1903 {
1904 struct system_segment_descriptor *tss_sd;
1905
1906 /*
1907 * The TSS descriptor was in use prior to launching the guest so it
1908 * has been marked busy.
1909 *
1910 * 'ltr' requires the descriptor to be marked available so change the
1911 * type to "64-bit available TSS".
1912 */
1913 tss_sd = PCPU_GET(tss);
1914 tss_sd->sd_type = SDT_SYSTSS;
1915 ltr(GSEL(GPROC0_SEL, SEL_KPL));
1916 }
1917
1918 static void
svm_pmap_activate(struct svm_vcpu * vcpu,pmap_t pmap)1919 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap)
1920 {
1921 struct vmcb_ctrl *ctrl;
1922 long eptgen;
1923 int cpu;
1924 bool alloc_asid;
1925
1926 cpu = curcpu;
1927 CPU_SET_ATOMIC(cpu, &pmap->pm_active);
1928 smr_enter(pmap->pm_eptsmr);
1929
1930 ctrl = svm_get_vmcb_ctrl(vcpu);
1931
1932 /*
1933 * The TLB entries associated with the vcpu's ASID are not valid
1934 * if either of the following conditions is true:
1935 *
1936 * 1. The vcpu's ASID generation is different than the host cpu's
1937 * ASID generation. This happens when the vcpu migrates to a new
1938 * host cpu. It can also happen when the number of vcpus executing
1939 * on a host cpu is greater than the number of ASIDs available.
1940 *
1941 * 2. The pmap generation number is different than the value cached in
1942 * the 'vcpustate'. This happens when the host invalidates pages
1943 * belonging to the guest.
1944 *
1945 * asidgen eptgen Action
1946 * mismatch mismatch
1947 * 0 0 (a)
1948 * 0 1 (b1) or (b2)
1949 * 1 0 (c)
1950 * 1 1 (d)
1951 *
1952 * (a) There is no mismatch in eptgen or ASID generation and therefore
1953 * no further action is needed.
1954 *
1955 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1956 * retained and the TLB entries associated with this ASID
1957 * are flushed by VMRUN.
1958 *
1959 * (b2) If the cpu does not support FlushByAsid then a new ASID is
1960 * allocated.
1961 *
1962 * (c) A new ASID is allocated.
1963 *
1964 * (d) A new ASID is allocated.
1965 */
1966
1967 alloc_asid = false;
1968 eptgen = atomic_load_long(&pmap->pm_eptgen);
1969 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
1970
1971 if (vcpu->asid.gen != asid[cpu].gen) {
1972 alloc_asid = true; /* (c) and (d) */
1973 } else if (vcpu->eptgen != eptgen) {
1974 if (flush_by_asid())
1975 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
1976 else
1977 alloc_asid = true; /* (b2) */
1978 } else {
1979 /*
1980 * This is the common case (a).
1981 */
1982 KASSERT(!alloc_asid, ("ASID allocation not necessary"));
1983 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
1984 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
1985 }
1986
1987 if (alloc_asid) {
1988 if (++asid[cpu].num >= nasid) {
1989 asid[cpu].num = 1;
1990 if (++asid[cpu].gen == 0)
1991 asid[cpu].gen = 1;
1992 /*
1993 * If this cpu does not support "flush-by-asid"
1994 * then flush the entire TLB on a generation
1995 * bump. Subsequent ASID allocation in this
1996 * generation can be done without a TLB flush.
1997 */
1998 if (!flush_by_asid())
1999 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
2000 }
2001 vcpu->asid.gen = asid[cpu].gen;
2002 vcpu->asid.num = asid[cpu].num;
2003
2004 ctrl->asid = vcpu->asid.num;
2005 svm_set_dirty(vcpu, VMCB_CACHE_ASID);
2006 /*
2007 * If this cpu supports "flush-by-asid" then the TLB
2008 * was not flushed after the generation bump. The TLB
2009 * is flushed selectively after every new ASID allocation.
2010 */
2011 if (flush_by_asid())
2012 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
2013 }
2014 vcpu->eptgen = eptgen;
2015
2016 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
2017 KASSERT(ctrl->asid == vcpu->asid.num,
2018 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num));
2019 }
2020
2021 static void
svm_pmap_deactivate(pmap_t pmap)2022 svm_pmap_deactivate(pmap_t pmap)
2023 {
2024 smr_exit(pmap->pm_eptsmr);
2025 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
2026 }
2027
2028 static __inline void
disable_gintr(void)2029 disable_gintr(void)
2030 {
2031
2032 __asm __volatile("clgi");
2033 }
2034
2035 static __inline void
enable_gintr(void)2036 enable_gintr(void)
2037 {
2038
2039 __asm __volatile("stgi");
2040 }
2041
2042 static __inline void
svm_dr_enter_guest(struct svm_regctx * gctx)2043 svm_dr_enter_guest(struct svm_regctx *gctx)
2044 {
2045
2046 /* Save host control debug registers. */
2047 gctx->host_dr7 = rdr7();
2048 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2049
2050 /*
2051 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2052 * exceptions in the host based on the guest DRx values. The
2053 * guest DR6, DR7, and DEBUGCTL are saved/restored in the
2054 * VMCB.
2055 */
2056 load_dr7(0);
2057 wrmsr(MSR_DEBUGCTLMSR, 0);
2058
2059 /* Save host debug registers. */
2060 gctx->host_dr0 = rdr0();
2061 gctx->host_dr1 = rdr1();
2062 gctx->host_dr2 = rdr2();
2063 gctx->host_dr3 = rdr3();
2064 gctx->host_dr6 = rdr6();
2065
2066 /* Restore guest debug registers. */
2067 load_dr0(gctx->sctx_dr0);
2068 load_dr1(gctx->sctx_dr1);
2069 load_dr2(gctx->sctx_dr2);
2070 load_dr3(gctx->sctx_dr3);
2071 }
2072
2073 static __inline void
svm_dr_leave_guest(struct svm_regctx * gctx)2074 svm_dr_leave_guest(struct svm_regctx *gctx)
2075 {
2076
2077 /* Save guest debug registers. */
2078 gctx->sctx_dr0 = rdr0();
2079 gctx->sctx_dr1 = rdr1();
2080 gctx->sctx_dr2 = rdr2();
2081 gctx->sctx_dr3 = rdr3();
2082
2083 /*
2084 * Restore host debug registers. Restore DR7 and DEBUGCTL
2085 * last.
2086 */
2087 load_dr0(gctx->host_dr0);
2088 load_dr1(gctx->host_dr1);
2089 load_dr2(gctx->host_dr2);
2090 load_dr3(gctx->host_dr3);
2091 load_dr6(gctx->host_dr6);
2092 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
2093 load_dr7(gctx->host_dr7);
2094 }
2095
2096 /*
2097 * Start vcpu with specified RIP.
2098 */
2099 static int
svm_run(void * vcpui,register_t rip,pmap_t pmap,struct vm_eventinfo * evinfo)2100 svm_run(void *vcpui, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo)
2101 {
2102 struct svm_regctx *gctx;
2103 struct svm_softc *svm_sc;
2104 struct svm_vcpu *vcpu;
2105 struct vmcb_state *state;
2106 struct vmcb_ctrl *ctrl;
2107 struct vm_exit *vmexit;
2108 struct vlapic *vlapic;
2109 uint64_t vmcb_pa;
2110 int handled;
2111 uint16_t ldt_sel;
2112
2113 vcpu = vcpui;
2114 svm_sc = vcpu->sc;
2115 state = svm_get_vmcb_state(vcpu);
2116 ctrl = svm_get_vmcb_ctrl(vcpu);
2117 vmexit = vm_exitinfo(vcpu->vcpu);
2118 vlapic = vm_lapic(vcpu->vcpu);
2119
2120 gctx = svm_get_guest_regctx(vcpu);
2121 vmcb_pa = vcpu->vmcb_pa;
2122
2123 if (vcpu->lastcpu != curcpu) {
2124 /*
2125 * Force new ASID allocation by invalidating the generation.
2126 */
2127 vcpu->asid.gen = 0;
2128
2129 /*
2130 * Invalidate the VMCB state cache by marking all fields dirty.
2131 */
2132 svm_set_dirty(vcpu, 0xffffffff);
2133
2134 /*
2135 * XXX
2136 * Setting 'vcpu->lastcpu' here is bit premature because
2137 * we may return from this function without actually executing
2138 * the VMRUN instruction. This could happen if a rendezvous
2139 * or an AST is pending on the first time through the loop.
2140 *
2141 * This works for now but any new side-effects of vcpu
2142 * migration should take this case into account.
2143 */
2144 vcpu->lastcpu = curcpu;
2145 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
2146 }
2147
2148 svm_msr_guest_enter(vcpu);
2149
2150 /* Update Guest RIP */
2151 state->rip = rip;
2152
2153 do {
2154 /*
2155 * Disable global interrupts to guarantee atomicity during
2156 * loading of guest state. This includes not only the state
2157 * loaded by the "vmrun" instruction but also software state
2158 * maintained by the hypervisor: suspended and rendezvous
2159 * state, NPT generation number, vlapic interrupts etc.
2160 */
2161 disable_gintr();
2162
2163 if (vcpu_suspended(evinfo)) {
2164 enable_gintr();
2165 vm_exit_suspended(vcpu->vcpu, state->rip);
2166 break;
2167 }
2168
2169 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) {
2170 enable_gintr();
2171 vm_exit_rendezvous(vcpu->vcpu, state->rip);
2172 break;
2173 }
2174
2175 if (vcpu_reqidle(evinfo)) {
2176 enable_gintr();
2177 vm_exit_reqidle(vcpu->vcpu, state->rip);
2178 break;
2179 }
2180
2181 /* We are asked to give the cpu by scheduler. */
2182 if (vcpu_should_yield(vcpu->vcpu)) {
2183 enable_gintr();
2184 vm_exit_astpending(vcpu->vcpu, state->rip);
2185 break;
2186 }
2187
2188 if (vcpu_debugged(vcpu->vcpu)) {
2189 enable_gintr();
2190 vm_exit_debug(vcpu->vcpu, state->rip);
2191 break;
2192 }
2193
2194 /*
2195 * #VMEXIT resumes the host with the guest LDTR, so
2196 * save the current LDT selector so it can be restored
2197 * after an exit. The userspace hypervisor probably
2198 * doesn't use a LDT, but save and restore it to be
2199 * safe.
2200 */
2201 ldt_sel = sldt();
2202
2203 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2204
2205 /*
2206 * Check the pmap generation and the ASID generation to
2207 * ensure that the vcpu does not use stale TLB mappings.
2208 */
2209 svm_pmap_activate(vcpu, pmap);
2210
2211 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
2212 vcpu->dirty = 0;
2213 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2214
2215 /* Launch Virtual Machine. */
2216 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip);
2217 svm_dr_enter_guest(gctx);
2218 svm_launch(vmcb_pa, gctx, get_pcpu());
2219 svm_dr_leave_guest(gctx);
2220
2221 svm_pmap_deactivate(pmap);
2222
2223 /*
2224 * The host GDTR and IDTR is saved by VMRUN and restored
2225 * automatically on #VMEXIT. However, the host TSS needs
2226 * to be restored explicitly.
2227 */
2228 restore_host_tss();
2229
2230 /* Restore host LDTR. */
2231 lldt(ldt_sel);
2232
2233 /* #VMEXIT disables interrupts so re-enable them here. */
2234 enable_gintr();
2235
2236 /* Update 'nextrip' */
2237 vcpu->nextrip = state->rip;
2238
2239 /* Handle #VMEXIT and if required return to user space. */
2240 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2241 } while (handled);
2242
2243 svm_msr_guest_exit(vcpu);
2244
2245 return (0);
2246 }
2247
2248 static void
svm_vcpu_cleanup(void * vcpui)2249 svm_vcpu_cleanup(void *vcpui)
2250 {
2251 struct svm_vcpu *vcpu = vcpui;
2252
2253 free(vcpu->vmcb, M_SVM);
2254 free(vcpu, M_SVM);
2255 }
2256
2257 static void
svm_cleanup(void * vmi)2258 svm_cleanup(void *vmi)
2259 {
2260 struct svm_softc *sc = vmi;
2261
2262 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
2263 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
2264 free(sc, M_SVM);
2265 }
2266
2267 static register_t *
swctx_regptr(struct svm_regctx * regctx,int reg)2268 swctx_regptr(struct svm_regctx *regctx, int reg)
2269 {
2270
2271 switch (reg) {
2272 case VM_REG_GUEST_RBX:
2273 return (®ctx->sctx_rbx);
2274 case VM_REG_GUEST_RCX:
2275 return (®ctx->sctx_rcx);
2276 case VM_REG_GUEST_RDX:
2277 return (®ctx->sctx_rdx);
2278 case VM_REG_GUEST_RDI:
2279 return (®ctx->sctx_rdi);
2280 case VM_REG_GUEST_RSI:
2281 return (®ctx->sctx_rsi);
2282 case VM_REG_GUEST_RBP:
2283 return (®ctx->sctx_rbp);
2284 case VM_REG_GUEST_R8:
2285 return (®ctx->sctx_r8);
2286 case VM_REG_GUEST_R9:
2287 return (®ctx->sctx_r9);
2288 case VM_REG_GUEST_R10:
2289 return (®ctx->sctx_r10);
2290 case VM_REG_GUEST_R11:
2291 return (®ctx->sctx_r11);
2292 case VM_REG_GUEST_R12:
2293 return (®ctx->sctx_r12);
2294 case VM_REG_GUEST_R13:
2295 return (®ctx->sctx_r13);
2296 case VM_REG_GUEST_R14:
2297 return (®ctx->sctx_r14);
2298 case VM_REG_GUEST_R15:
2299 return (®ctx->sctx_r15);
2300 case VM_REG_GUEST_DR0:
2301 return (®ctx->sctx_dr0);
2302 case VM_REG_GUEST_DR1:
2303 return (®ctx->sctx_dr1);
2304 case VM_REG_GUEST_DR2:
2305 return (®ctx->sctx_dr2);
2306 case VM_REG_GUEST_DR3:
2307 return (®ctx->sctx_dr3);
2308 default:
2309 return (NULL);
2310 }
2311 }
2312
2313 static int
svm_getreg(void * vcpui,int ident,uint64_t * val)2314 svm_getreg(void *vcpui, int ident, uint64_t *val)
2315 {
2316 struct svm_vcpu *vcpu;
2317 register_t *reg;
2318
2319 vcpu = vcpui;
2320
2321 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2322 return (svm_get_intr_shadow(vcpu, val));
2323 }
2324
2325 if (vmcb_read(vcpu, ident, val) == 0) {
2326 return (0);
2327 }
2328
2329 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
2330
2331 if (reg != NULL) {
2332 *val = *reg;
2333 return (0);
2334 }
2335
2336 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident);
2337 return (EINVAL);
2338 }
2339
2340 static int
svm_setreg(void * vcpui,int ident,uint64_t val)2341 svm_setreg(void *vcpui, int ident, uint64_t val)
2342 {
2343 struct svm_vcpu *vcpu;
2344 register_t *reg;
2345
2346 vcpu = vcpui;
2347
2348 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2349 return (svm_modify_intr_shadow(vcpu, val));
2350 }
2351
2352 /* Do not permit user write access to VMCB fields by offset. */
2353 if (!VMCB_ACCESS_OK(ident)) {
2354 if (vmcb_write(vcpu, ident, val) == 0) {
2355 return (0);
2356 }
2357 }
2358
2359 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
2360
2361 if (reg != NULL) {
2362 *reg = val;
2363 return (0);
2364 }
2365
2366 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) {
2367 /* Ignore. */
2368 return (0);
2369 }
2370
2371 /*
2372 * XXX deal with CR3 and invalidate TLB entries tagged with the
2373 * vcpu's ASID. This needs to be treated differently depending on
2374 * whether 'running' is true/false.
2375 */
2376
2377 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident);
2378 return (EINVAL);
2379 }
2380
2381 static int
svm_getdesc(void * vcpui,int reg,struct seg_desc * desc)2382 svm_getdesc(void *vcpui, int reg, struct seg_desc *desc)
2383 {
2384 return (vmcb_getdesc(vcpui, reg, desc));
2385 }
2386
2387 static int
svm_setdesc(void * vcpui,int reg,struct seg_desc * desc)2388 svm_setdesc(void *vcpui, int reg, struct seg_desc *desc)
2389 {
2390 return (vmcb_setdesc(vcpui, reg, desc));
2391 }
2392
2393 #ifdef BHYVE_SNAPSHOT
2394 static int
svm_snapshot_reg(void * vcpui,int ident,struct vm_snapshot_meta * meta)2395 svm_snapshot_reg(void *vcpui, int ident, struct vm_snapshot_meta *meta)
2396 {
2397 int ret;
2398 uint64_t val;
2399
2400 if (meta->op == VM_SNAPSHOT_SAVE) {
2401 ret = svm_getreg(vcpui, ident, &val);
2402 if (ret != 0)
2403 goto done;
2404
2405 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2406 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
2407 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2408
2409 ret = svm_setreg(vcpui, ident, val);
2410 if (ret != 0)
2411 goto done;
2412 } else {
2413 ret = EINVAL;
2414 goto done;
2415 }
2416
2417 done:
2418 return (ret);
2419 }
2420 #endif
2421
2422 static int
svm_setcap(void * vcpui,int type,int val)2423 svm_setcap(void *vcpui, int type, int val)
2424 {
2425 struct svm_vcpu *vcpu;
2426 struct vlapic *vlapic;
2427 int error;
2428
2429 vcpu = vcpui;
2430 error = 0;
2431
2432 switch (type) {
2433 case VM_CAP_HALT_EXIT:
2434 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
2435 VMCB_INTCPT_HLT, val);
2436 break;
2437 case VM_CAP_PAUSE_EXIT:
2438 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
2439 VMCB_INTCPT_PAUSE, val);
2440 break;
2441 case VM_CAP_UNRESTRICTED_GUEST:
2442 /* Unrestricted guest execution cannot be disabled in SVM */
2443 if (val == 0)
2444 error = EINVAL;
2445 break;
2446 case VM_CAP_BPT_EXIT:
2447 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val);
2448 break;
2449 case VM_CAP_IPI_EXIT:
2450 vlapic = vm_lapic(vcpu->vcpu);
2451 vlapic->ipi_exit = val;
2452 break;
2453 case VM_CAP_MASK_HWINTR:
2454 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR);
2455 vcpu->caps |= (val << VM_CAP_MASK_HWINTR);
2456 break;
2457 case VM_CAP_RFLAGS_TF: {
2458 uint64_t rflags;
2459
2460 /* Fetch RFLAGS. */
2461 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) {
2462 error = (EINVAL);
2463 break;
2464 }
2465 if (val) {
2466 /* Save current TF bit. */
2467 vcpu->dbg.rflags_tf = rflags & PSL_T;
2468 /* Trace next instruction. */
2469 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS,
2470 (rflags | PSL_T))) {
2471 error = (EINVAL);
2472 break;
2473 }
2474 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF);
2475 } else {
2476 /*
2477 * Restore shadowed RFLAGS.TF only if vCPU was
2478 * previously stepped
2479 */
2480 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
2481 rflags &= ~PSL_T;
2482 rflags |= vcpu->dbg.rflags_tf;
2483 vcpu->dbg.rflags_tf = 0;
2484
2485 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS,
2486 rflags)) {
2487 error = (EINVAL);
2488 break;
2489 }
2490 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF);
2491 }
2492 }
2493
2494 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val);
2495 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF,
2496 val);
2497 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF,
2498 val);
2499 break;
2500 }
2501 default:
2502 error = ENOENT;
2503 break;
2504 }
2505 return (error);
2506 }
2507
2508 static int
svm_getcap(void * vcpui,int type,int * retval)2509 svm_getcap(void *vcpui, int type, int *retval)
2510 {
2511 struct svm_vcpu *vcpu;
2512 struct vlapic *vlapic;
2513 int error;
2514
2515 vcpu = vcpui;
2516 error = 0;
2517
2518 switch (type) {
2519 case VM_CAP_HALT_EXIT:
2520 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
2521 VMCB_INTCPT_HLT);
2522 break;
2523 case VM_CAP_PAUSE_EXIT:
2524 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
2525 VMCB_INTCPT_PAUSE);
2526 break;
2527 case VM_CAP_UNRESTRICTED_GUEST:
2528 *retval = 1; /* unrestricted guest is always enabled */
2529 break;
2530 case VM_CAP_BPT_EXIT:
2531 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP));
2532 break;
2533 case VM_CAP_IPI_EXIT:
2534 vlapic = vm_lapic(vcpu->vcpu);
2535 *retval = vlapic->ipi_exit;
2536 break;
2537 case VM_CAP_RFLAGS_TF:
2538 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF));
2539 break;
2540 case VM_CAP_MASK_HWINTR:
2541 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR));
2542 break;
2543 default:
2544 error = ENOENT;
2545 break;
2546 }
2547 return (error);
2548 }
2549
2550 static struct vmspace *
svm_vmspace_alloc(vm_offset_t min,vm_offset_t max)2551 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max)
2552 {
2553 return (svm_npt_alloc(min, max));
2554 }
2555
2556 static void
svm_vmspace_free(struct vmspace * vmspace)2557 svm_vmspace_free(struct vmspace *vmspace)
2558 {
2559 svm_npt_free(vmspace);
2560 }
2561
2562 static struct vlapic *
svm_vlapic_init(void * vcpui)2563 svm_vlapic_init(void *vcpui)
2564 {
2565 struct svm_vcpu *vcpu;
2566 struct vlapic *vlapic;
2567
2568 vcpu = vcpui;
2569 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2570 vlapic->vm = vcpu->sc->vm;
2571 vlapic->vcpu = vcpu->vcpu;
2572 vlapic->vcpuid = vcpu->vcpuid;
2573 vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC,
2574 M_WAITOK | M_ZERO);
2575
2576 vlapic_init(vlapic);
2577
2578 return (vlapic);
2579 }
2580
2581 static void
svm_vlapic_cleanup(struct vlapic * vlapic)2582 svm_vlapic_cleanup(struct vlapic *vlapic)
2583 {
2584
2585 vlapic_cleanup(vlapic);
2586 free(vlapic->apic_page, M_SVM_VLAPIC);
2587 free(vlapic, M_SVM_VLAPIC);
2588 }
2589
2590 #ifdef BHYVE_SNAPSHOT
2591 static int
svm_vcpu_snapshot(void * vcpui,struct vm_snapshot_meta * meta)2592 svm_vcpu_snapshot(void *vcpui, struct vm_snapshot_meta *meta)
2593 {
2594 struct svm_vcpu *vcpu;
2595 int err, running, hostcpu;
2596
2597 vcpu = vcpui;
2598 err = 0;
2599
2600 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
2601 if (running && hostcpu != curcpu) {
2602 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
2603 vcpu->vcpuid);
2604 return (EINVAL);
2605 }
2606
2607 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta);
2608 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta);
2609 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta);
2610 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta);
2611
2612 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta);
2613 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta);
2614
2615 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta);
2616
2617 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta);
2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta);
2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta);
2620
2621 /* Guest segments */
2622 /* ES */
2623 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta);
2624 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta);
2625
2626 /* CS */
2627 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta);
2628 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta);
2629
2630 /* SS */
2631 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta);
2632 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta);
2633
2634 /* DS */
2635 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta);
2636 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta);
2637
2638 /* FS */
2639 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta);
2640 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta);
2641
2642 /* GS */
2643 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta);
2644 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta);
2645
2646 /* TR */
2647 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta);
2648 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta);
2649
2650 /* LDTR */
2651 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta);
2652 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta);
2653
2654 /* EFER */
2655 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta);
2656
2657 /* IDTR and GDTR */
2658 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta);
2659 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta);
2660
2661 /* Specific AMD registers */
2662 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
2663
2664 err += vmcb_snapshot_any(vcpu,
2665 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta);
2666 err += vmcb_snapshot_any(vcpu,
2667 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta);
2668 err += vmcb_snapshot_any(vcpu,
2669 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta);
2670 err += vmcb_snapshot_any(vcpu,
2671 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta);
2672 err += vmcb_snapshot_any(vcpu,
2673 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta);
2674
2675 err += vmcb_snapshot_any(vcpu,
2676 VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta);
2677 err += vmcb_snapshot_any(vcpu,
2678 VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta);
2679
2680 err += vmcb_snapshot_any(vcpu,
2681 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta);
2682
2683 err += vmcb_snapshot_any(vcpu,
2684 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta);
2685
2686 err += vmcb_snapshot_any(vcpu,
2687 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta);
2688
2689 err += vmcb_snapshot_any(vcpu,
2690 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta);
2691 err += vmcb_snapshot_any(vcpu,
2692 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta);
2693 err += vmcb_snapshot_any(vcpu,
2694 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta);
2695 err += vmcb_snapshot_any(vcpu,
2696 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta);
2697
2698 err += vmcb_snapshot_any(vcpu,
2699 VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta);
2700
2701 err += vmcb_snapshot_any(vcpu,
2702 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta);
2703 err += vmcb_snapshot_any(vcpu,
2704 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta);
2705 err += vmcb_snapshot_any(vcpu,
2706 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta);
2707 err += vmcb_snapshot_any(vcpu,
2708 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta);
2709
2710 err += vmcb_snapshot_any(vcpu,
2711 VMCB_ACCESS(VMCB_OFF_CPL, 1), meta);
2712
2713 err += vmcb_snapshot_any(vcpu,
2714 VMCB_ACCESS(VMCB_OFF_STAR, 8), meta);
2715 err += vmcb_snapshot_any(vcpu,
2716 VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta);
2717 err += vmcb_snapshot_any(vcpu,
2718 VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta);
2719
2720 err += vmcb_snapshot_any(vcpu,
2721 VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta);
2722
2723 err += vmcb_snapshot_any(vcpu,
2724 VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta);
2725
2726 err += vmcb_snapshot_any(vcpu,
2727 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta);
2728 err += vmcb_snapshot_any(vcpu,
2729 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta);
2730 err += vmcb_snapshot_any(vcpu,
2731 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta);
2732
2733 err += vmcb_snapshot_any(vcpu,
2734 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta);
2735
2736 err += vmcb_snapshot_any(vcpu,
2737 VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta);
2738 err += vmcb_snapshot_any(vcpu,
2739 VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta);
2740 err += vmcb_snapshot_any(vcpu,
2741 VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta);
2742 err += vmcb_snapshot_any(vcpu,
2743 VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta);
2744 err += vmcb_snapshot_any(vcpu,
2745 VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta);
2746 if (err != 0)
2747 goto done;
2748
2749 /* Snapshot swctx for virtual cpu */
2750 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done);
2751 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done);
2752 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done);
2753 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done);
2754 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done);
2755 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done);
2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done);
2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done);
2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done);
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done);
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done);
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done);
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done);
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done);
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done);
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done);
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done);
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done);
2768
2769 /* Restore other svm_vcpu struct fields */
2770
2771 /* Restore NEXTRIP field */
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done);
2773
2774 /* Restore lastcpu field */
2775 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done);
2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done);
2777
2778 /* Restore EPTGEN field - EPT is Extended Page Table */
2779 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done);
2780
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done);
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done);
2783
2784 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done);
2785
2786 /* Set all caches dirty */
2787 if (meta->op == VM_SNAPSHOT_RESTORE)
2788 svm_set_dirty(vcpu, 0xffffffff);
2789
2790 done:
2791 return (err);
2792 }
2793
2794 static int
svm_restore_tsc(void * vcpui,uint64_t offset)2795 svm_restore_tsc(void *vcpui, uint64_t offset)
2796 {
2797 struct svm_vcpu *vcpu = vcpui;
2798
2799 svm_set_tsc_offset(vcpu, offset);
2800
2801 return (0);
2802 }
2803 #endif
2804
2805 const struct vmm_ops vmm_ops_amd = {
2806 .modinit = svm_modinit,
2807 .modcleanup = svm_modcleanup,
2808 .modresume = svm_modresume,
2809 .init = svm_init,
2810 .run = svm_run,
2811 .cleanup = svm_cleanup,
2812 .vcpu_init = svm_vcpu_init,
2813 .vcpu_cleanup = svm_vcpu_cleanup,
2814 .getreg = svm_getreg,
2815 .setreg = svm_setreg,
2816 .getdesc = svm_getdesc,
2817 .setdesc = svm_setdesc,
2818 .getcap = svm_getcap,
2819 .setcap = svm_setcap,
2820 .vmspace_alloc = svm_vmspace_alloc,
2821 .vmspace_free = svm_vmspace_free,
2822 .vlapic_init = svm_vlapic_init,
2823 .vlapic_cleanup = svm_vlapic_cleanup,
2824 #ifdef BHYVE_SNAPSHOT
2825 .vcpu_snapshot = svm_vcpu_snapshot,
2826 .restore_tsc = svm_restore_tsc,
2827 #endif
2828 };
2829