1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "opt_bhyve_snapshot.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
38 #include <sys/pcpu.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sx.h>
46 #include <sys/vnode.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vnode_pager.h>
58 #include <vm/swap_pager.h>
59 #include <vm/uma.h>
60
61 #include <machine/cpu.h>
62 #include <machine/pcb.h>
63 #include <machine/smp.h>
64 #include <machine/md_var.h>
65 #include <x86/psl.h>
66 #include <x86/apicreg.h>
67 #include <x86/ifunc.h>
68
69 #include <machine/vmm.h>
70 #include <machine/vmm_dev.h>
71 #include <machine/vmm_instruction_emul.h>
72 #include <machine/vmm_snapshot.h>
73
74 #include "vmm_ioport.h"
75 #include "vmm_ktr.h"
76 #include "vmm_host.h"
77 #include "vmm_mem.h"
78 #include "vmm_util.h"
79 #include "vatpic.h"
80 #include "vatpit.h"
81 #include "vhpet.h"
82 #include "vioapic.h"
83 #include "vlapic.h"
84 #include "vpmtmr.h"
85 #include "vrtc.h"
86 #include "vmm_stat.h"
87 #include "vmm_lapic.h"
88
89 #include "io/ppt.h"
90 #include "io/iommu.h"
91
92 struct vlapic;
93
94 /*
95 * Initialization:
96 * (a) allocated when vcpu is created
97 * (i) initialized when vcpu is created and when it is reinitialized
98 * (o) initialized the first time the vcpu is created
99 * (x) initialized before use
100 */
101 struct vcpu {
102 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
103 enum vcpu_state state; /* (o) vcpu state */
104 int vcpuid; /* (o) */
105 int hostcpu; /* (o) vcpu's host cpu */
106 int reqidle; /* (i) request vcpu to idle */
107 struct vm *vm; /* (o) */
108 void *cookie; /* (i) cpu-specific data */
109 struct vlapic *vlapic; /* (i) APIC device model */
110 enum x2apic_state x2apic_state; /* (i) APIC mode */
111 uint64_t exitintinfo; /* (i) events pending at VM exit */
112 int nmi_pending; /* (i) NMI pending */
113 int extint_pending; /* (i) INTR pending */
114 int exception_pending; /* (i) exception pending */
115 int exc_vector; /* (x) exception collateral */
116 int exc_errcode_valid;
117 uint32_t exc_errcode;
118 struct savefpu *guestfpu; /* (a,i) guest fpu state */
119 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
120 void *stats; /* (a,i) statistics */
121 struct vm_exit exitinfo; /* (x) exit reason and collateral */
122 cpuset_t exitinfo_cpuset; /* (x) storage for vmexit handlers */
123 uint64_t nextrip; /* (x) next instruction to execute */
124 uint64_t tsc_offset; /* (o) TSC offsetting */
125 };
126
127 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
128 #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx))
129 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
130 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
131 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
132
133 struct mem_seg {
134 size_t len;
135 bool sysmem;
136 struct vm_object *object;
137 };
138 #define VM_MAX_MEMSEGS 4
139
140 struct mem_map {
141 vm_paddr_t gpa;
142 size_t len;
143 vm_ooffset_t segoff;
144 int segid;
145 int prot;
146 int flags;
147 };
148 #define VM_MAX_MEMMAPS 8
149
150 /*
151 * Initialization:
152 * (o) initialized the first time the VM is created
153 * (i) initialized when VM is created and when it is reinitialized
154 * (x) initialized before use
155 *
156 * Locking:
157 * [m] mem_segs_lock
158 * [r] rendezvous_mtx
159 * [v] reads require one frozen vcpu, writes require freezing all vcpus
160 */
161 struct vm {
162 void *cookie; /* (i) cpu-specific data */
163 void *iommu; /* (x) iommu-specific data */
164 struct vhpet *vhpet; /* (i) virtual HPET */
165 struct vioapic *vioapic; /* (i) virtual ioapic */
166 struct vatpic *vatpic; /* (i) virtual atpic */
167 struct vatpit *vatpit; /* (i) virtual atpit */
168 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
169 struct vrtc *vrtc; /* (o) virtual RTC */
170 volatile cpuset_t active_cpus; /* (i) active vcpus */
171 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
172 cpuset_t startup_cpus; /* (i) [r] waiting for startup */
173 int suspend; /* (i) stop VM execution */
174 bool dying; /* (o) is dying */
175 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
176 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
177 cpuset_t rendezvous_req_cpus; /* (x) [r] rendezvous requested */
178 cpuset_t rendezvous_done_cpus; /* (x) [r] rendezvous finished */
179 void *rendezvous_arg; /* (x) [r] rendezvous func/arg */
180 vm_rendezvous_func_t rendezvous_func;
181 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
182 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) [m+v] guest address space */
183 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) [m+v] guest memory regions */
184 struct vmspace *vmspace; /* (o) guest's address space */
185 char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */
186 struct vcpu **vcpu; /* (o) guest vcpus */
187 /* The following describe the vm cpu topology */
188 uint16_t sockets; /* (o) num of sockets */
189 uint16_t cores; /* (o) num of cores/socket */
190 uint16_t threads; /* (o) num of threads/core */
191 uint16_t maxcpus; /* (o) max pluggable cpus */
192 struct sx mem_segs_lock; /* (o) */
193 struct sx vcpus_init_lock; /* (o) */
194 };
195
196 #define VMM_CTR0(vcpu, format) \
197 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
198
199 #define VMM_CTR1(vcpu, format, p1) \
200 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
201
202 #define VMM_CTR2(vcpu, format, p1, p2) \
203 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
204
205 #define VMM_CTR3(vcpu, format, p1, p2, p3) \
206 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
207
208 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \
209 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
210
211 static int vmm_initialized;
212
213 static void vmmops_panic(void);
214
215 static void
vmmops_panic(void)216 vmmops_panic(void)
217 {
218 panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()");
219 }
220
221 #define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
222 DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \
223 { \
224 if (vmm_is_intel()) \
225 return (vmm_ops_intel.opname); \
226 else if (vmm_is_svm()) \
227 return (vmm_ops_amd.opname); \
228 else \
229 return ((ret_type (*)args)vmmops_panic); \
230 }
231
232 DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
233 DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
234 DEFINE_VMMOPS_IFUNC(void, modresume, (void))
235 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
236 DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap,
237 struct vm_eventinfo *info))
238 DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
239 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
240 int vcpu_id))
241 DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
242 DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
243 DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
244 DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc))
245 DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc))
246 DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
247 DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
248 DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
249 vm_offset_t max))
250 DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
251 DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui))
252 DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic))
253 #ifdef BHYVE_SNAPSHOT
254 DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
255 struct vm_snapshot_meta *meta))
256 DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
257 #endif
258
259 SDT_PROVIDER_DEFINE(vmm);
260
261 static MALLOC_DEFINE(M_VM, "vm", "vm");
262
263 /* statistics */
264 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
265
266 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
267 NULL);
268
269 /*
270 * Halt the guest if all vcpus are executing a HLT instruction with
271 * interrupts disabled.
272 */
273 static int halt_detection_enabled = 1;
274 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
275 &halt_detection_enabled, 0,
276 "Halt VM if all vcpus execute HLT with interrupts disabled");
277
278 static int vmm_ipinum;
279 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
280 "IPI vector used for vcpu notifications");
281
282 static int trace_guest_exceptions;
283 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
284 &trace_guest_exceptions, 0,
285 "Trap into hypervisor on all guest exceptions and reflect them back");
286
287 static int trap_wbinvd;
288 SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
289 "WBINVD triggers a VM-exit");
290
291 u_int vm_maxcpu;
292 SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
293 &vm_maxcpu, 0, "Maximum number of vCPUs");
294
295 static void vm_free_memmap(struct vm *vm, int ident);
296 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
297 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
298
299 /*
300 * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU
301 * counts as well as range of vpid values for VT-x and by the capacity
302 * of cpuset_t masks. The call to new_unrhdr() in vpid_init() in
303 * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below.
304 */
305 #define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
306
307 #ifdef KTR
308 static const char *
vcpu_state2str(enum vcpu_state state)309 vcpu_state2str(enum vcpu_state state)
310 {
311
312 switch (state) {
313 case VCPU_IDLE:
314 return ("idle");
315 case VCPU_FROZEN:
316 return ("frozen");
317 case VCPU_RUNNING:
318 return ("running");
319 case VCPU_SLEEPING:
320 return ("sleeping");
321 default:
322 return ("unknown");
323 }
324 }
325 #endif
326
327 static void
vcpu_cleanup(struct vcpu * vcpu,bool destroy)328 vcpu_cleanup(struct vcpu *vcpu, bool destroy)
329 {
330 vmmops_vlapic_cleanup(vcpu->vlapic);
331 vmmops_vcpu_cleanup(vcpu->cookie);
332 vcpu->cookie = NULL;
333 if (destroy) {
334 vmm_stat_free(vcpu->stats);
335 fpu_save_area_free(vcpu->guestfpu);
336 vcpu_lock_destroy(vcpu);
337 free(vcpu, M_VM);
338 }
339 }
340
341 static struct vcpu *
vcpu_alloc(struct vm * vm,int vcpu_id)342 vcpu_alloc(struct vm *vm, int vcpu_id)
343 {
344 struct vcpu *vcpu;
345
346 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
347 ("vcpu_init: invalid vcpu %d", vcpu_id));
348
349 vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO);
350 vcpu_lock_init(vcpu);
351 vcpu->state = VCPU_IDLE;
352 vcpu->hostcpu = NOCPU;
353 vcpu->vcpuid = vcpu_id;
354 vcpu->vm = vm;
355 vcpu->guestfpu = fpu_save_area_alloc();
356 vcpu->stats = vmm_stat_alloc();
357 vcpu->tsc_offset = 0;
358 return (vcpu);
359 }
360
361 static void
vcpu_init(struct vcpu * vcpu)362 vcpu_init(struct vcpu *vcpu)
363 {
364 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
365 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
366 vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
367 vcpu->reqidle = 0;
368 vcpu->exitintinfo = 0;
369 vcpu->nmi_pending = 0;
370 vcpu->extint_pending = 0;
371 vcpu->exception_pending = 0;
372 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
373 fpu_save_area_reset(vcpu->guestfpu);
374 vmm_stat_init(vcpu->stats);
375 }
376
377 int
vcpu_trace_exceptions(struct vcpu * vcpu)378 vcpu_trace_exceptions(struct vcpu *vcpu)
379 {
380
381 return (trace_guest_exceptions);
382 }
383
384 int
vcpu_trap_wbinvd(struct vcpu * vcpu)385 vcpu_trap_wbinvd(struct vcpu *vcpu)
386 {
387 return (trap_wbinvd);
388 }
389
390 struct vm_exit *
vm_exitinfo(struct vcpu * vcpu)391 vm_exitinfo(struct vcpu *vcpu)
392 {
393 return (&vcpu->exitinfo);
394 }
395
396 cpuset_t *
vm_exitinfo_cpuset(struct vcpu * vcpu)397 vm_exitinfo_cpuset(struct vcpu *vcpu)
398 {
399 return (&vcpu->exitinfo_cpuset);
400 }
401
402 static int
vmm_init(void)403 vmm_init(void)
404 {
405 int error;
406
407 if (!vmm_is_hw_supported())
408 return (ENXIO);
409
410 vm_maxcpu = mp_ncpus;
411 TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu);
412
413 if (vm_maxcpu > VM_MAXCPU) {
414 printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU);
415 vm_maxcpu = VM_MAXCPU;
416 }
417 if (vm_maxcpu == 0)
418 vm_maxcpu = 1;
419
420 vmm_host_state_init();
421
422 vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
423 &IDTVEC(justreturn));
424 if (vmm_ipinum < 0)
425 vmm_ipinum = IPI_AST;
426
427 error = vmm_mem_init();
428 if (error)
429 return (error);
430
431 vmm_resume_p = vmmops_modresume;
432
433 return (vmmops_modinit(vmm_ipinum));
434 }
435
436 static int
vmm_handler(module_t mod,int what,void * arg)437 vmm_handler(module_t mod, int what, void *arg)
438 {
439 int error;
440
441 switch (what) {
442 case MOD_LOAD:
443 if (vmm_is_hw_supported()) {
444 vmmdev_init();
445 error = vmm_init();
446 if (error == 0)
447 vmm_initialized = 1;
448 } else {
449 error = ENXIO;
450 }
451 break;
452 case MOD_UNLOAD:
453 if (vmm_is_hw_supported()) {
454 error = vmmdev_cleanup();
455 if (error == 0) {
456 vmm_resume_p = NULL;
457 iommu_cleanup();
458 if (vmm_ipinum != IPI_AST)
459 lapic_ipi_free(vmm_ipinum);
460 error = vmmops_modcleanup();
461 /*
462 * Something bad happened - prevent new
463 * VMs from being created
464 */
465 if (error)
466 vmm_initialized = 0;
467 }
468 } else {
469 error = 0;
470 }
471 break;
472 default:
473 error = 0;
474 break;
475 }
476 return (error);
477 }
478
479 static moduledata_t vmm_kmod = {
480 "vmm",
481 vmm_handler,
482 NULL
483 };
484
485 /*
486 * vmm initialization has the following dependencies:
487 *
488 * - VT-x initialization requires smp_rendezvous() and therefore must happen
489 * after SMP is fully functional (after SI_SUB_SMP).
490 */
491 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
492 MODULE_VERSION(vmm, 1);
493
494 static void
vm_init(struct vm * vm,bool create)495 vm_init(struct vm *vm, bool create)
496 {
497 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
498 vm->iommu = NULL;
499 vm->vioapic = vioapic_init(vm);
500 vm->vhpet = vhpet_init(vm);
501 vm->vatpic = vatpic_init(vm);
502 vm->vatpit = vatpit_init(vm);
503 vm->vpmtmr = vpmtmr_init(vm);
504 if (create)
505 vm->vrtc = vrtc_init(vm);
506
507 CPU_ZERO(&vm->active_cpus);
508 CPU_ZERO(&vm->debug_cpus);
509 CPU_ZERO(&vm->startup_cpus);
510
511 vm->suspend = 0;
512 CPU_ZERO(&vm->suspended_cpus);
513
514 if (!create) {
515 for (int i = 0; i < vm->maxcpus; i++) {
516 if (vm->vcpu[i] != NULL)
517 vcpu_init(vm->vcpu[i]);
518 }
519 }
520 }
521
522 void
vm_disable_vcpu_creation(struct vm * vm)523 vm_disable_vcpu_creation(struct vm *vm)
524 {
525 sx_xlock(&vm->vcpus_init_lock);
526 vm->dying = true;
527 sx_xunlock(&vm->vcpus_init_lock);
528 }
529
530 struct vcpu *
vm_alloc_vcpu(struct vm * vm,int vcpuid)531 vm_alloc_vcpu(struct vm *vm, int vcpuid)
532 {
533 struct vcpu *vcpu;
534
535 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm))
536 return (NULL);
537
538 vcpu = atomic_load_ptr(&vm->vcpu[vcpuid]);
539 if (__predict_true(vcpu != NULL))
540 return (vcpu);
541
542 sx_xlock(&vm->vcpus_init_lock);
543 vcpu = vm->vcpu[vcpuid];
544 if (vcpu == NULL && !vm->dying) {
545 vcpu = vcpu_alloc(vm, vcpuid);
546 vcpu_init(vcpu);
547
548 /*
549 * Ensure vCPU is fully created before updating pointer
550 * to permit unlocked reads above.
551 */
552 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
553 (uintptr_t)vcpu);
554 }
555 sx_xunlock(&vm->vcpus_init_lock);
556 return (vcpu);
557 }
558
559 void
vm_slock_vcpus(struct vm * vm)560 vm_slock_vcpus(struct vm *vm)
561 {
562 sx_slock(&vm->vcpus_init_lock);
563 }
564
565 void
vm_unlock_vcpus(struct vm * vm)566 vm_unlock_vcpus(struct vm *vm)
567 {
568 sx_unlock(&vm->vcpus_init_lock);
569 }
570
571 /*
572 * The default CPU topology is a single thread per package.
573 */
574 u_int cores_per_package = 1;
575 u_int threads_per_core = 1;
576
577 int
vm_create(const char * name,struct vm ** retvm)578 vm_create(const char *name, struct vm **retvm)
579 {
580 struct vm *vm;
581 struct vmspace *vmspace;
582
583 /*
584 * If vmm.ko could not be successfully initialized then don't attempt
585 * to create the virtual machine.
586 */
587 if (!vmm_initialized)
588 return (ENXIO);
589
590 if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) ==
591 VM_MAX_NAMELEN + 1)
592 return (EINVAL);
593
594 vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48);
595 if (vmspace == NULL)
596 return (ENOMEM);
597
598 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
599 strcpy(vm->name, name);
600 vm->vmspace = vmspace;
601 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
602 sx_init(&vm->mem_segs_lock, "vm mem_segs");
603 sx_init(&vm->vcpus_init_lock, "vm vcpus");
604 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
605 M_ZERO);
606
607 vm->sockets = 1;
608 vm->cores = cores_per_package; /* XXX backwards compatibility */
609 vm->threads = threads_per_core; /* XXX backwards compatibility */
610 vm->maxcpus = vm_maxcpu;
611
612 vm_init(vm, true);
613
614 *retvm = vm;
615 return (0);
616 }
617
618 void
vm_get_topology(struct vm * vm,uint16_t * sockets,uint16_t * cores,uint16_t * threads,uint16_t * maxcpus)619 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
620 uint16_t *threads, uint16_t *maxcpus)
621 {
622 *sockets = vm->sockets;
623 *cores = vm->cores;
624 *threads = vm->threads;
625 *maxcpus = vm->maxcpus;
626 }
627
628 uint16_t
vm_get_maxcpus(struct vm * vm)629 vm_get_maxcpus(struct vm *vm)
630 {
631 return (vm->maxcpus);
632 }
633
634 int
vm_set_topology(struct vm * vm,uint16_t sockets,uint16_t cores,uint16_t threads,uint16_t maxcpus __unused)635 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
636 uint16_t threads, uint16_t maxcpus __unused)
637 {
638 /* Ignore maxcpus. */
639 if ((sockets * cores * threads) > vm->maxcpus)
640 return (EINVAL);
641 vm->sockets = sockets;
642 vm->cores = cores;
643 vm->threads = threads;
644 return(0);
645 }
646
647 static void
vm_cleanup(struct vm * vm,bool destroy)648 vm_cleanup(struct vm *vm, bool destroy)
649 {
650 struct mem_map *mm;
651 int i;
652
653 if (destroy)
654 vm_xlock_memsegs(vm);
655
656 ppt_unassign_all(vm);
657
658 if (vm->iommu != NULL)
659 iommu_destroy_domain(vm->iommu);
660
661 if (destroy)
662 vrtc_cleanup(vm->vrtc);
663 else
664 vrtc_reset(vm->vrtc);
665 vpmtmr_cleanup(vm->vpmtmr);
666 vatpit_cleanup(vm->vatpit);
667 vhpet_cleanup(vm->vhpet);
668 vatpic_cleanup(vm->vatpic);
669 vioapic_cleanup(vm->vioapic);
670
671 for (i = 0; i < vm->maxcpus; i++) {
672 if (vm->vcpu[i] != NULL)
673 vcpu_cleanup(vm->vcpu[i], destroy);
674 }
675
676 vmmops_cleanup(vm->cookie);
677
678 /*
679 * System memory is removed from the guest address space only when
680 * the VM is destroyed. This is because the mapping remains the same
681 * across VM reset.
682 *
683 * Device memory can be relocated by the guest (e.g. using PCI BARs)
684 * so those mappings are removed on a VM reset.
685 */
686 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
687 mm = &vm->mem_maps[i];
688 if (destroy || !sysmem_mapping(vm, mm))
689 vm_free_memmap(vm, i);
690 }
691
692 if (destroy) {
693 for (i = 0; i < VM_MAX_MEMSEGS; i++)
694 vm_free_memseg(vm, i);
695 vm_unlock_memsegs(vm);
696
697 vmmops_vmspace_free(vm->vmspace);
698 vm->vmspace = NULL;
699
700 free(vm->vcpu, M_VM);
701 sx_destroy(&vm->vcpus_init_lock);
702 sx_destroy(&vm->mem_segs_lock);
703 mtx_destroy(&vm->rendezvous_mtx);
704 }
705 }
706
707 void
vm_destroy(struct vm * vm)708 vm_destroy(struct vm *vm)
709 {
710 vm_cleanup(vm, true);
711 free(vm, M_VM);
712 }
713
714 int
vm_reinit(struct vm * vm)715 vm_reinit(struct vm *vm)
716 {
717 int error;
718
719 /*
720 * A virtual machine can be reset only if all vcpus are suspended.
721 */
722 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
723 vm_cleanup(vm, false);
724 vm_init(vm, false);
725 error = 0;
726 } else {
727 error = EBUSY;
728 }
729
730 return (error);
731 }
732
733 const char *
vm_name(struct vm * vm)734 vm_name(struct vm *vm)
735 {
736 return (vm->name);
737 }
738
739 void
vm_slock_memsegs(struct vm * vm)740 vm_slock_memsegs(struct vm *vm)
741 {
742 sx_slock(&vm->mem_segs_lock);
743 }
744
745 void
vm_xlock_memsegs(struct vm * vm)746 vm_xlock_memsegs(struct vm *vm)
747 {
748 sx_xlock(&vm->mem_segs_lock);
749 }
750
751 void
vm_unlock_memsegs(struct vm * vm)752 vm_unlock_memsegs(struct vm *vm)
753 {
754 sx_unlock(&vm->mem_segs_lock);
755 }
756
757 int
vm_map_mmio(struct vm * vm,vm_paddr_t gpa,size_t len,vm_paddr_t hpa)758 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
759 {
760 vm_object_t obj;
761
762 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
763 return (ENOMEM);
764 else
765 return (0);
766 }
767
768 int
vm_unmap_mmio(struct vm * vm,vm_paddr_t gpa,size_t len)769 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
770 {
771
772 vmm_mmio_free(vm->vmspace, gpa, len);
773 return (0);
774 }
775
776 /*
777 * Return 'true' if 'gpa' is allocated in the guest address space.
778 *
779 * This function is called in the context of a running vcpu which acts as
780 * an implicit lock on 'vm->mem_maps[]'.
781 */
782 bool
vm_mem_allocated(struct vcpu * vcpu,vm_paddr_t gpa)783 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
784 {
785 struct vm *vm = vcpu->vm;
786 struct mem_map *mm;
787 int i;
788
789 #ifdef INVARIANTS
790 int hostcpu, state;
791 state = vcpu_get_state(vcpu, &hostcpu);
792 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
793 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
794 #endif
795
796 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
797 mm = &vm->mem_maps[i];
798 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
799 return (true); /* 'gpa' is sysmem or devmem */
800 }
801
802 if (ppt_is_mmio(vm, gpa))
803 return (true); /* 'gpa' is pci passthru mmio */
804
805 return (false);
806 }
807
808 int
vm_alloc_memseg(struct vm * vm,int ident,size_t len,bool sysmem)809 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
810 {
811 struct mem_seg *seg;
812 vm_object_t obj;
813
814 sx_assert(&vm->mem_segs_lock, SX_XLOCKED);
815
816 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
817 return (EINVAL);
818
819 if (len == 0 || (len & PAGE_MASK))
820 return (EINVAL);
821
822 seg = &vm->mem_segs[ident];
823 if (seg->object != NULL) {
824 if (seg->len == len && seg->sysmem == sysmem)
825 return (EEXIST);
826 else
827 return (EINVAL);
828 }
829
830 obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT);
831 if (obj == NULL)
832 return (ENOMEM);
833
834 seg->len = len;
835 seg->object = obj;
836 seg->sysmem = sysmem;
837 return (0);
838 }
839
840 int
vm_get_memseg(struct vm * vm,int ident,size_t * len,bool * sysmem,vm_object_t * objptr)841 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
842 vm_object_t *objptr)
843 {
844 struct mem_seg *seg;
845
846 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
847
848 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
849 return (EINVAL);
850
851 seg = &vm->mem_segs[ident];
852 if (len)
853 *len = seg->len;
854 if (sysmem)
855 *sysmem = seg->sysmem;
856 if (objptr)
857 *objptr = seg->object;
858 return (0);
859 }
860
861 void
vm_free_memseg(struct vm * vm,int ident)862 vm_free_memseg(struct vm *vm, int ident)
863 {
864 struct mem_seg *seg;
865
866 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
867 ("%s: invalid memseg ident %d", __func__, ident));
868
869 seg = &vm->mem_segs[ident];
870 if (seg->object != NULL) {
871 vm_object_deallocate(seg->object);
872 bzero(seg, sizeof(struct mem_seg));
873 }
874 }
875
876 int
vm_mmap_memseg(struct vm * vm,vm_paddr_t gpa,int segid,vm_ooffset_t first,size_t len,int prot,int flags)877 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
878 size_t len, int prot, int flags)
879 {
880 struct mem_seg *seg;
881 struct mem_map *m, *map;
882 vm_ooffset_t last;
883 int i, error;
884
885 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
886 return (EINVAL);
887
888 if (flags & ~VM_MEMMAP_F_WIRED)
889 return (EINVAL);
890
891 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
892 return (EINVAL);
893
894 seg = &vm->mem_segs[segid];
895 if (seg->object == NULL)
896 return (EINVAL);
897
898 last = first + len;
899 if (first < 0 || first >= last || last > seg->len)
900 return (EINVAL);
901
902 if ((gpa | first | last) & PAGE_MASK)
903 return (EINVAL);
904
905 map = NULL;
906 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
907 m = &vm->mem_maps[i];
908 if (m->len == 0) {
909 map = m;
910 break;
911 }
912 }
913
914 if (map == NULL)
915 return (ENOSPC);
916
917 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
918 len, 0, VMFS_NO_SPACE, prot, prot, 0);
919 if (error != KERN_SUCCESS)
920 return (EFAULT);
921
922 vm_object_reference(seg->object);
923
924 if (flags & VM_MEMMAP_F_WIRED) {
925 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
926 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
927 if (error != KERN_SUCCESS) {
928 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
929 return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
930 EFAULT);
931 }
932 }
933
934 map->gpa = gpa;
935 map->len = len;
936 map->segoff = first;
937 map->segid = segid;
938 map->prot = prot;
939 map->flags = flags;
940 return (0);
941 }
942
943 int
vm_munmap_memseg(struct vm * vm,vm_paddr_t gpa,size_t len)944 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
945 {
946 struct mem_map *m;
947 int i;
948
949 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
950 m = &vm->mem_maps[i];
951 if (m->gpa == gpa && m->len == len &&
952 (m->flags & VM_MEMMAP_F_IOMMU) == 0) {
953 vm_free_memmap(vm, i);
954 return (0);
955 }
956 }
957
958 return (EINVAL);
959 }
960
961 int
vm_mmap_getnext(struct vm * vm,vm_paddr_t * gpa,int * segid,vm_ooffset_t * segoff,size_t * len,int * prot,int * flags)962 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
963 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
964 {
965 struct mem_map *mm, *mmnext;
966 int i;
967
968 mmnext = NULL;
969 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
970 mm = &vm->mem_maps[i];
971 if (mm->len == 0 || mm->gpa < *gpa)
972 continue;
973 if (mmnext == NULL || mm->gpa < mmnext->gpa)
974 mmnext = mm;
975 }
976
977 if (mmnext != NULL) {
978 *gpa = mmnext->gpa;
979 if (segid)
980 *segid = mmnext->segid;
981 if (segoff)
982 *segoff = mmnext->segoff;
983 if (len)
984 *len = mmnext->len;
985 if (prot)
986 *prot = mmnext->prot;
987 if (flags)
988 *flags = mmnext->flags;
989 return (0);
990 } else {
991 return (ENOENT);
992 }
993 }
994
995 static void
vm_free_memmap(struct vm * vm,int ident)996 vm_free_memmap(struct vm *vm, int ident)
997 {
998 struct mem_map *mm;
999 int error __diagused;
1000
1001 mm = &vm->mem_maps[ident];
1002 if (mm->len) {
1003 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
1004 mm->gpa + mm->len);
1005 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
1006 __func__, error));
1007 bzero(mm, sizeof(struct mem_map));
1008 }
1009 }
1010
1011 static __inline bool
sysmem_mapping(struct vm * vm,struct mem_map * mm)1012 sysmem_mapping(struct vm *vm, struct mem_map *mm)
1013 {
1014
1015 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
1016 return (true);
1017 else
1018 return (false);
1019 }
1020
1021 vm_paddr_t
vmm_sysmem_maxaddr(struct vm * vm)1022 vmm_sysmem_maxaddr(struct vm *vm)
1023 {
1024 struct mem_map *mm;
1025 vm_paddr_t maxaddr;
1026 int i;
1027
1028 maxaddr = 0;
1029 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
1030 mm = &vm->mem_maps[i];
1031 if (sysmem_mapping(vm, mm)) {
1032 if (maxaddr < mm->gpa + mm->len)
1033 maxaddr = mm->gpa + mm->len;
1034 }
1035 }
1036 return (maxaddr);
1037 }
1038
1039 static void
vm_iommu_map(struct vm * vm)1040 vm_iommu_map(struct vm *vm)
1041 {
1042 vm_paddr_t gpa, hpa;
1043 struct mem_map *mm;
1044 int i;
1045
1046 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
1047
1048 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
1049 mm = &vm->mem_maps[i];
1050 if (!sysmem_mapping(vm, mm))
1051 continue;
1052
1053 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
1054 ("iommu map found invalid memmap %#lx/%#lx/%#x",
1055 mm->gpa, mm->len, mm->flags));
1056 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
1057 continue;
1058 mm->flags |= VM_MEMMAP_F_IOMMU;
1059
1060 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
1061 hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
1062
1063 /*
1064 * All mappings in the vmm vmspace must be
1065 * present since they are managed by vmm in this way.
1066 * Because we are in pass-through mode, the
1067 * mappings must also be wired. This implies
1068 * that all pages must be mapped and wired,
1069 * allowing to use pmap_extract() and avoiding the
1070 * need to use vm_gpa_hold_global().
1071 *
1072 * This could change if/when we start
1073 * supporting page faults on IOMMU maps.
1074 */
1075 KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(hpa)),
1076 ("vm_iommu_map: vm %p gpa %jx hpa %jx not wired",
1077 vm, (uintmax_t)gpa, (uintmax_t)hpa));
1078
1079 iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
1080 }
1081 }
1082
1083 iommu_invalidate_tlb(iommu_host_domain());
1084 }
1085
1086 static void
vm_iommu_unmap(struct vm * vm)1087 vm_iommu_unmap(struct vm *vm)
1088 {
1089 vm_paddr_t gpa;
1090 struct mem_map *mm;
1091 int i;
1092
1093 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
1094
1095 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
1096 mm = &vm->mem_maps[i];
1097 if (!sysmem_mapping(vm, mm))
1098 continue;
1099
1100 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
1101 continue;
1102 mm->flags &= ~VM_MEMMAP_F_IOMMU;
1103 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
1104 ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
1105 mm->gpa, mm->len, mm->flags));
1106
1107 for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
1108 KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
1109 vmspace_pmap(vm->vmspace), gpa))),
1110 ("vm_iommu_unmap: vm %p gpa %jx not wired",
1111 vm, (uintmax_t)gpa));
1112 iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
1113 }
1114 }
1115
1116 /*
1117 * Invalidate the cached translations associated with the domain
1118 * from which pages were removed.
1119 */
1120 iommu_invalidate_tlb(vm->iommu);
1121 }
1122
1123 int
vm_unassign_pptdev(struct vm * vm,int bus,int slot,int func)1124 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
1125 {
1126 int error;
1127
1128 error = ppt_unassign_device(vm, bus, slot, func);
1129 if (error)
1130 return (error);
1131
1132 if (ppt_assigned_devices(vm) == 0)
1133 vm_iommu_unmap(vm);
1134
1135 return (0);
1136 }
1137
1138 int
vm_assign_pptdev(struct vm * vm,int bus,int slot,int func)1139 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
1140 {
1141 int error;
1142 vm_paddr_t maxaddr;
1143
1144 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
1145 if (ppt_assigned_devices(vm) == 0) {
1146 KASSERT(vm->iommu == NULL,
1147 ("vm_assign_pptdev: iommu must be NULL"));
1148 maxaddr = vmm_sysmem_maxaddr(vm);
1149 vm->iommu = iommu_create_domain(maxaddr);
1150 if (vm->iommu == NULL)
1151 return (ENXIO);
1152 vm_iommu_map(vm);
1153 }
1154
1155 error = ppt_assign_device(vm, bus, slot, func);
1156 return (error);
1157 }
1158
1159 static void *
_vm_gpa_hold(struct vm * vm,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)1160 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
1161 void **cookie)
1162 {
1163 int i, count, pageoff;
1164 struct mem_map *mm;
1165 vm_page_t m;
1166
1167 pageoff = gpa & PAGE_MASK;
1168 if (len > PAGE_SIZE - pageoff)
1169 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
1170
1171 count = 0;
1172 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
1173 mm = &vm->mem_maps[i];
1174 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
1175 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
1176 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
1177 break;
1178 }
1179 }
1180
1181 if (count == 1) {
1182 *cookie = m;
1183 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
1184 } else {
1185 *cookie = NULL;
1186 return (NULL);
1187 }
1188 }
1189
1190 void *
vm_gpa_hold(struct vcpu * vcpu,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)1191 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
1192 void **cookie)
1193 {
1194 #ifdef INVARIANTS
1195 /*
1196 * The current vcpu should be frozen to ensure 'vm_memmap[]'
1197 * stability.
1198 */
1199 int state = vcpu_get_state(vcpu, NULL);
1200 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
1201 __func__, state));
1202 #endif
1203 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1204 }
1205
1206 void *
vm_gpa_hold_global(struct vm * vm,vm_paddr_t gpa,size_t len,int reqprot,void ** cookie)1207 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
1208 void **cookie)
1209 {
1210 sx_assert(&vm->mem_segs_lock, SX_LOCKED);
1211 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
1212 }
1213
1214 void
vm_gpa_release(void * cookie)1215 vm_gpa_release(void *cookie)
1216 {
1217 vm_page_t m = cookie;
1218
1219 vm_page_unwire(m, PQ_ACTIVE);
1220 }
1221
1222 int
vm_get_register(struct vcpu * vcpu,int reg,uint64_t * retval)1223 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
1224 {
1225
1226 if (reg >= VM_REG_LAST)
1227 return (EINVAL);
1228
1229 return (vmmops_getreg(vcpu->cookie, reg, retval));
1230 }
1231
1232 int
vm_set_register(struct vcpu * vcpu,int reg,uint64_t val)1233 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
1234 {
1235 int error;
1236
1237 if (reg >= VM_REG_LAST)
1238 return (EINVAL);
1239
1240 error = vmmops_setreg(vcpu->cookie, reg, val);
1241 if (error || reg != VM_REG_GUEST_RIP)
1242 return (error);
1243
1244 /* Set 'nextrip' to match the value of %rip */
1245 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
1246 vcpu->nextrip = val;
1247 return (0);
1248 }
1249
1250 static bool
is_descriptor_table(int reg)1251 is_descriptor_table(int reg)
1252 {
1253
1254 switch (reg) {
1255 case VM_REG_GUEST_IDTR:
1256 case VM_REG_GUEST_GDTR:
1257 return (true);
1258 default:
1259 return (false);
1260 }
1261 }
1262
1263 static bool
is_segment_register(int reg)1264 is_segment_register(int reg)
1265 {
1266
1267 switch (reg) {
1268 case VM_REG_GUEST_ES:
1269 case VM_REG_GUEST_CS:
1270 case VM_REG_GUEST_SS:
1271 case VM_REG_GUEST_DS:
1272 case VM_REG_GUEST_FS:
1273 case VM_REG_GUEST_GS:
1274 case VM_REG_GUEST_TR:
1275 case VM_REG_GUEST_LDTR:
1276 return (true);
1277 default:
1278 return (false);
1279 }
1280 }
1281
1282 int
vm_get_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * desc)1283 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1284 {
1285
1286 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1287 return (EINVAL);
1288
1289 return (vmmops_getdesc(vcpu->cookie, reg, desc));
1290 }
1291
1292 int
vm_set_seg_desc(struct vcpu * vcpu,int reg,struct seg_desc * desc)1293 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1294 {
1295
1296 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1297 return (EINVAL);
1298
1299 return (vmmops_setdesc(vcpu->cookie, reg, desc));
1300 }
1301
1302 static void
restore_guest_fpustate(struct vcpu * vcpu)1303 restore_guest_fpustate(struct vcpu *vcpu)
1304 {
1305
1306 /* flush host state to the pcb */
1307 fpuexit(curthread);
1308
1309 /* restore guest FPU state */
1310 fpu_enable();
1311 fpurestore(vcpu->guestfpu);
1312
1313 /* restore guest XCR0 if XSAVE is enabled in the host */
1314 if (rcr4() & CR4_XSAVE)
1315 load_xcr(0, vcpu->guest_xcr0);
1316
1317 /*
1318 * The FPU is now "dirty" with the guest's state so disable
1319 * the FPU to trap any access by the host.
1320 */
1321 fpu_disable();
1322 }
1323
1324 static void
save_guest_fpustate(struct vcpu * vcpu)1325 save_guest_fpustate(struct vcpu *vcpu)
1326 {
1327
1328 if ((rcr0() & CR0_TS) == 0)
1329 panic("fpu emulation not enabled in host!");
1330
1331 /* save guest XCR0 and restore host XCR0 */
1332 if (rcr4() & CR4_XSAVE) {
1333 vcpu->guest_xcr0 = rxcr(0);
1334 load_xcr(0, vmm_get_host_xcr0());
1335 }
1336
1337 /* save guest FPU state */
1338 fpu_enable();
1339 fpusave(vcpu->guestfpu);
1340 fpu_disable();
1341 }
1342
1343 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1344
1345 static int
vcpu_set_state_locked(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle)1346 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
1347 bool from_idle)
1348 {
1349 int error;
1350
1351 vcpu_assert_locked(vcpu);
1352
1353 /*
1354 * State transitions from the vmmdev_ioctl() must always begin from
1355 * the VCPU_IDLE state. This guarantees that there is only a single
1356 * ioctl() operating on a vcpu at any point.
1357 */
1358 if (from_idle) {
1359 while (vcpu->state != VCPU_IDLE) {
1360 vcpu->reqidle = 1;
1361 vcpu_notify_event_locked(vcpu, false);
1362 VMM_CTR1(vcpu, "vcpu state change from %s to "
1363 "idle requested", vcpu_state2str(vcpu->state));
1364 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1365 }
1366 } else {
1367 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1368 "vcpu idle state"));
1369 }
1370
1371 if (vcpu->state == VCPU_RUNNING) {
1372 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1373 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1374 } else {
1375 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1376 "vcpu that is not running", vcpu->hostcpu));
1377 }
1378
1379 /*
1380 * The following state transitions are allowed:
1381 * IDLE -> FROZEN -> IDLE
1382 * FROZEN -> RUNNING -> FROZEN
1383 * FROZEN -> SLEEPING -> FROZEN
1384 */
1385 switch (vcpu->state) {
1386 case VCPU_IDLE:
1387 case VCPU_RUNNING:
1388 case VCPU_SLEEPING:
1389 error = (newstate != VCPU_FROZEN);
1390 break;
1391 case VCPU_FROZEN:
1392 error = (newstate == VCPU_FROZEN);
1393 break;
1394 default:
1395 error = 1;
1396 break;
1397 }
1398
1399 if (error)
1400 return (EBUSY);
1401
1402 VMM_CTR2(vcpu, "vcpu state changed from %s to %s",
1403 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1404
1405 vcpu->state = newstate;
1406 if (newstate == VCPU_RUNNING)
1407 vcpu->hostcpu = curcpu;
1408 else
1409 vcpu->hostcpu = NOCPU;
1410
1411 if (newstate == VCPU_IDLE)
1412 wakeup(&vcpu->state);
1413
1414 return (0);
1415 }
1416
1417 static void
vcpu_require_state(struct vcpu * vcpu,enum vcpu_state newstate)1418 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
1419 {
1420 int error;
1421
1422 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
1423 panic("Error %d setting state to %d\n", error, newstate);
1424 }
1425
1426 static void
vcpu_require_state_locked(struct vcpu * vcpu,enum vcpu_state newstate)1427 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
1428 {
1429 int error;
1430
1431 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
1432 panic("Error %d setting state to %d", error, newstate);
1433 }
1434
1435 static int
vm_handle_rendezvous(struct vcpu * vcpu)1436 vm_handle_rendezvous(struct vcpu *vcpu)
1437 {
1438 struct vm *vm = vcpu->vm;
1439 struct thread *td;
1440 int error, vcpuid;
1441
1442 error = 0;
1443 vcpuid = vcpu->vcpuid;
1444 td = curthread;
1445 mtx_lock(&vm->rendezvous_mtx);
1446 while (vm->rendezvous_func != NULL) {
1447 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1448 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
1449
1450 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1451 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1452 VMM_CTR0(vcpu, "Calling rendezvous func");
1453 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
1454 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1455 }
1456 if (CPU_CMP(&vm->rendezvous_req_cpus,
1457 &vm->rendezvous_done_cpus) == 0) {
1458 VMM_CTR0(vcpu, "Rendezvous completed");
1459 CPU_ZERO(&vm->rendezvous_req_cpus);
1460 vm->rendezvous_func = NULL;
1461 wakeup(&vm->rendezvous_func);
1462 break;
1463 }
1464 VMM_CTR0(vcpu, "Wait for rendezvous completion");
1465 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1466 "vmrndv", hz);
1467 if (td_ast_pending(td, TDA_SUSPEND)) {
1468 mtx_unlock(&vm->rendezvous_mtx);
1469 error = thread_check_susp(td, true);
1470 if (error != 0)
1471 return (error);
1472 mtx_lock(&vm->rendezvous_mtx);
1473 }
1474 }
1475 mtx_unlock(&vm->rendezvous_mtx);
1476 return (0);
1477 }
1478
1479 /*
1480 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1481 */
1482 static int
vm_handle_hlt(struct vcpu * vcpu,bool intr_disabled,bool * retu)1483 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu)
1484 {
1485 struct vm *vm = vcpu->vm;
1486 const char *wmesg;
1487 struct thread *td;
1488 int error, t, vcpuid, vcpu_halted, vm_halted;
1489
1490 vcpuid = vcpu->vcpuid;
1491 vcpu_halted = 0;
1492 vm_halted = 0;
1493 error = 0;
1494 td = curthread;
1495
1496 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1497
1498 vcpu_lock(vcpu);
1499 while (1) {
1500 /*
1501 * Do a final check for pending NMI or interrupts before
1502 * really putting this thread to sleep. Also check for
1503 * software events that would cause this vcpu to wakeup.
1504 *
1505 * These interrupts/events could have happened after the
1506 * vcpu returned from vmmops_run() and before it acquired the
1507 * vcpu lock above.
1508 */
1509 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1510 break;
1511 if (vm_nmi_pending(vcpu))
1512 break;
1513 if (!intr_disabled) {
1514 if (vm_extint_pending(vcpu) ||
1515 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1516 break;
1517 }
1518 }
1519
1520 /* Don't go to sleep if the vcpu thread needs to yield */
1521 if (vcpu_should_yield(vcpu))
1522 break;
1523
1524 if (vcpu_debugged(vcpu))
1525 break;
1526
1527 /*
1528 * Some Linux guests implement "halt" by having all vcpus
1529 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1530 * track of the vcpus that have entered this state. When all
1531 * vcpus enter the halted state the virtual machine is halted.
1532 */
1533 if (intr_disabled) {
1534 wmesg = "vmhalt";
1535 VMM_CTR0(vcpu, "Halted");
1536 if (!vcpu_halted && halt_detection_enabled) {
1537 vcpu_halted = 1;
1538 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1539 }
1540 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1541 vm_halted = 1;
1542 break;
1543 }
1544 } else {
1545 wmesg = "vmidle";
1546 }
1547
1548 t = ticks;
1549 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1550 /*
1551 * XXX msleep_spin() cannot be interrupted by signals so
1552 * wake up periodically to check pending signals.
1553 */
1554 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1555 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1556 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
1557 if (td_ast_pending(td, TDA_SUSPEND)) {
1558 vcpu_unlock(vcpu);
1559 error = thread_check_susp(td, false);
1560 if (error != 0) {
1561 if (vcpu_halted) {
1562 CPU_CLR_ATOMIC(vcpuid,
1563 &vm->halted_cpus);
1564 }
1565 return (error);
1566 }
1567 vcpu_lock(vcpu);
1568 }
1569 }
1570
1571 if (vcpu_halted)
1572 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1573
1574 vcpu_unlock(vcpu);
1575
1576 if (vm_halted)
1577 vm_suspend(vm, VM_SUSPEND_HALT);
1578
1579 return (0);
1580 }
1581
1582 static int
vm_handle_paging(struct vcpu * vcpu,bool * retu)1583 vm_handle_paging(struct vcpu *vcpu, bool *retu)
1584 {
1585 struct vm *vm = vcpu->vm;
1586 int rv, ftype;
1587 struct vm_map *map;
1588 struct vm_exit *vme;
1589
1590 vme = &vcpu->exitinfo;
1591
1592 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1593 __func__, vme->inst_length));
1594
1595 ftype = vme->u.paging.fault_type;
1596 KASSERT(ftype == VM_PROT_READ ||
1597 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1598 ("vm_handle_paging: invalid fault_type %d", ftype));
1599
1600 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1601 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1602 vme->u.paging.gpa, ftype);
1603 if (rv == 0) {
1604 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
1605 ftype == VM_PROT_READ ? "accessed" : "dirty",
1606 vme->u.paging.gpa);
1607 goto done;
1608 }
1609 }
1610
1611 map = &vm->vmspace->vm_map;
1612 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
1613
1614 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
1615 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1616
1617 if (rv != KERN_SUCCESS)
1618 return (EFAULT);
1619 done:
1620 return (0);
1621 }
1622
1623 static int
vm_handle_inst_emul(struct vcpu * vcpu,bool * retu)1624 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
1625 {
1626 struct vie *vie;
1627 struct vm_exit *vme;
1628 uint64_t gla, gpa, cs_base;
1629 struct vm_guest_paging *paging;
1630 mem_region_read_t mread;
1631 mem_region_write_t mwrite;
1632 enum vm_cpu_mode cpu_mode;
1633 int cs_d, error, fault;
1634
1635 vme = &vcpu->exitinfo;
1636
1637 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1638 __func__, vme->inst_length));
1639
1640 gla = vme->u.inst_emul.gla;
1641 gpa = vme->u.inst_emul.gpa;
1642 cs_base = vme->u.inst_emul.cs_base;
1643 cs_d = vme->u.inst_emul.cs_d;
1644 vie = &vme->u.inst_emul.vie;
1645 paging = &vme->u.inst_emul.paging;
1646 cpu_mode = paging->cpu_mode;
1647
1648 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa);
1649
1650 /* Fetch, decode and emulate the faulting instruction */
1651 if (vie->num_valid == 0) {
1652 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
1653 VIE_INST_SIZE, vie, &fault);
1654 } else {
1655 /*
1656 * The instruction bytes have already been copied into 'vie'
1657 */
1658 error = fault = 0;
1659 }
1660 if (error || fault)
1661 return (error);
1662
1663 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
1664 VMM_CTR1(vcpu, "Error decoding instruction at %#lx",
1665 vme->rip + cs_base);
1666 *retu = true; /* dump instruction bytes in userspace */
1667 return (0);
1668 }
1669
1670 /*
1671 * Update 'nextrip' based on the length of the emulated instruction.
1672 */
1673 vme->inst_length = vie->num_processed;
1674 vcpu->nextrip += vie->num_processed;
1675 VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding",
1676 vcpu->nextrip);
1677
1678 /* return to userland unless this is an in-kernel emulated device */
1679 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1680 mread = lapic_mmio_read;
1681 mwrite = lapic_mmio_write;
1682 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1683 mread = vioapic_mmio_read;
1684 mwrite = vioapic_mmio_write;
1685 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1686 mread = vhpet_mmio_read;
1687 mwrite = vhpet_mmio_write;
1688 } else {
1689 *retu = true;
1690 return (0);
1691 }
1692
1693 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
1694 retu);
1695
1696 return (error);
1697 }
1698
1699 static int
vm_handle_suspend(struct vcpu * vcpu,bool * retu)1700 vm_handle_suspend(struct vcpu *vcpu, bool *retu)
1701 {
1702 struct vm *vm = vcpu->vm;
1703 int error, i;
1704 struct thread *td;
1705
1706 error = 0;
1707 td = curthread;
1708
1709 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
1710
1711 /*
1712 * Wait until all 'active_cpus' have suspended themselves.
1713 *
1714 * Since a VM may be suspended at any time including when one or
1715 * more vcpus are doing a rendezvous we need to call the rendezvous
1716 * handler while we are waiting to prevent a deadlock.
1717 */
1718 vcpu_lock(vcpu);
1719 while (error == 0) {
1720 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1721 VMM_CTR0(vcpu, "All vcpus suspended");
1722 break;
1723 }
1724
1725 if (vm->rendezvous_func == NULL) {
1726 VMM_CTR0(vcpu, "Sleeping during suspend");
1727 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1728 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1729 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1730 if (td_ast_pending(td, TDA_SUSPEND)) {
1731 vcpu_unlock(vcpu);
1732 error = thread_check_susp(td, false);
1733 vcpu_lock(vcpu);
1734 }
1735 } else {
1736 VMM_CTR0(vcpu, "Rendezvous during suspend");
1737 vcpu_unlock(vcpu);
1738 error = vm_handle_rendezvous(vcpu);
1739 vcpu_lock(vcpu);
1740 }
1741 }
1742 vcpu_unlock(vcpu);
1743
1744 /*
1745 * Wakeup the other sleeping vcpus and return to userspace.
1746 */
1747 for (i = 0; i < vm->maxcpus; i++) {
1748 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1749 vcpu_notify_event(vm_vcpu(vm, i), false);
1750 }
1751 }
1752
1753 *retu = true;
1754 return (error);
1755 }
1756
1757 static int
vm_handle_reqidle(struct vcpu * vcpu,bool * retu)1758 vm_handle_reqidle(struct vcpu *vcpu, bool *retu)
1759 {
1760 vcpu_lock(vcpu);
1761 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1762 vcpu->reqidle = 0;
1763 vcpu_unlock(vcpu);
1764 *retu = true;
1765 return (0);
1766 }
1767
1768 static int
vm_handle_db(struct vcpu * vcpu,struct vm_exit * vme,bool * retu)1769 vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
1770 {
1771 int error, fault;
1772 uint64_t rsp;
1773 uint64_t rflags;
1774 struct vm_copyinfo copyinfo;
1775
1776 *retu = true;
1777 if (!vme->u.dbg.pushf_intercept || vme->u.dbg.tf_shadow_val != 0) {
1778 return (0);
1779 }
1780
1781 vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp);
1782 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t),
1783 VM_PROT_RW, ©info, 1, &fault);
1784 if (error != 0 || fault != 0) {
1785 *retu = false;
1786 return (EINVAL);
1787 }
1788
1789 /* Read pushed rflags value from top of stack. */
1790 vm_copyin(©info, &rflags, sizeof(uint64_t));
1791
1792 /* Clear TF bit. */
1793 rflags &= ~(PSL_T);
1794
1795 /* Write updated value back to memory. */
1796 vm_copyout(&rflags, ©info, sizeof(uint64_t));
1797 vm_copy_teardown(©info, 1);
1798
1799 return (0);
1800 }
1801
1802 int
vm_suspend(struct vm * vm,enum vm_suspend_how how)1803 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1804 {
1805 int i;
1806
1807 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1808 return (EINVAL);
1809
1810 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1811 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1812 vm->suspend, how);
1813 return (EALREADY);
1814 }
1815
1816 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1817
1818 /*
1819 * Notify all active vcpus that they are now suspended.
1820 */
1821 for (i = 0; i < vm->maxcpus; i++) {
1822 if (CPU_ISSET(i, &vm->active_cpus))
1823 vcpu_notify_event(vm_vcpu(vm, i), false);
1824 }
1825
1826 return (0);
1827 }
1828
1829 void
vm_exit_suspended(struct vcpu * vcpu,uint64_t rip)1830 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
1831 {
1832 struct vm *vm = vcpu->vm;
1833 struct vm_exit *vmexit;
1834
1835 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1836 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1837
1838 vmexit = vm_exitinfo(vcpu);
1839 vmexit->rip = rip;
1840 vmexit->inst_length = 0;
1841 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1842 vmexit->u.suspended.how = vm->suspend;
1843 }
1844
1845 void
vm_exit_debug(struct vcpu * vcpu,uint64_t rip)1846 vm_exit_debug(struct vcpu *vcpu, uint64_t rip)
1847 {
1848 struct vm_exit *vmexit;
1849
1850 vmexit = vm_exitinfo(vcpu);
1851 vmexit->rip = rip;
1852 vmexit->inst_length = 0;
1853 vmexit->exitcode = VM_EXITCODE_DEBUG;
1854 }
1855
1856 void
vm_exit_rendezvous(struct vcpu * vcpu,uint64_t rip)1857 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip)
1858 {
1859 struct vm_exit *vmexit;
1860
1861 vmexit = vm_exitinfo(vcpu);
1862 vmexit->rip = rip;
1863 vmexit->inst_length = 0;
1864 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1865 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1);
1866 }
1867
1868 void
vm_exit_reqidle(struct vcpu * vcpu,uint64_t rip)1869 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip)
1870 {
1871 struct vm_exit *vmexit;
1872
1873 vmexit = vm_exitinfo(vcpu);
1874 vmexit->rip = rip;
1875 vmexit->inst_length = 0;
1876 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1877 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1);
1878 }
1879
1880 void
vm_exit_astpending(struct vcpu * vcpu,uint64_t rip)1881 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
1882 {
1883 struct vm_exit *vmexit;
1884
1885 vmexit = vm_exitinfo(vcpu);
1886 vmexit->rip = rip;
1887 vmexit->inst_length = 0;
1888 vmexit->exitcode = VM_EXITCODE_BOGUS;
1889 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1);
1890 }
1891
1892 int
vm_run(struct vcpu * vcpu)1893 vm_run(struct vcpu *vcpu)
1894 {
1895 struct vm *vm = vcpu->vm;
1896 struct vm_eventinfo evinfo;
1897 int error, vcpuid;
1898 struct pcb *pcb;
1899 uint64_t tscval;
1900 struct vm_exit *vme;
1901 bool retu, intr_disabled;
1902 pmap_t pmap;
1903
1904 vcpuid = vcpu->vcpuid;
1905
1906 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1907 return (EINVAL);
1908
1909 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1910 return (EINVAL);
1911
1912 pmap = vmspace_pmap(vm->vmspace);
1913 vme = &vcpu->exitinfo;
1914 evinfo.rptr = &vm->rendezvous_req_cpus;
1915 evinfo.sptr = &vm->suspend;
1916 evinfo.iptr = &vcpu->reqidle;
1917 restart:
1918 critical_enter();
1919
1920 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1921 ("vm_run: absurd pm_active"));
1922
1923 tscval = rdtsc();
1924
1925 pcb = PCPU_GET(curpcb);
1926 set_pcb_flags(pcb, PCB_FULL_IRET);
1927
1928 restore_guest_fpustate(vcpu);
1929
1930 vcpu_require_state(vcpu, VCPU_RUNNING);
1931 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
1932 vcpu_require_state(vcpu, VCPU_FROZEN);
1933
1934 save_guest_fpustate(vcpu);
1935
1936 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1937
1938 critical_exit();
1939
1940 if (error == 0) {
1941 retu = false;
1942 vcpu->nextrip = vme->rip + vme->inst_length;
1943 switch (vme->exitcode) {
1944 case VM_EXITCODE_REQIDLE:
1945 error = vm_handle_reqidle(vcpu, &retu);
1946 break;
1947 case VM_EXITCODE_SUSPENDED:
1948 error = vm_handle_suspend(vcpu, &retu);
1949 break;
1950 case VM_EXITCODE_IOAPIC_EOI:
1951 vioapic_process_eoi(vm, vme->u.ioapic_eoi.vector);
1952 break;
1953 case VM_EXITCODE_RENDEZVOUS:
1954 error = vm_handle_rendezvous(vcpu);
1955 break;
1956 case VM_EXITCODE_HLT:
1957 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1958 error = vm_handle_hlt(vcpu, intr_disabled, &retu);
1959 break;
1960 case VM_EXITCODE_PAGING:
1961 error = vm_handle_paging(vcpu, &retu);
1962 break;
1963 case VM_EXITCODE_INST_EMUL:
1964 error = vm_handle_inst_emul(vcpu, &retu);
1965 break;
1966 case VM_EXITCODE_INOUT:
1967 case VM_EXITCODE_INOUT_STR:
1968 error = vm_handle_inout(vcpu, vme, &retu);
1969 break;
1970 case VM_EXITCODE_DB:
1971 error = vm_handle_db(vcpu, vme, &retu);
1972 break;
1973 case VM_EXITCODE_MONITOR:
1974 case VM_EXITCODE_MWAIT:
1975 case VM_EXITCODE_VMINSN:
1976 vm_inject_ud(vcpu);
1977 break;
1978 default:
1979 retu = true; /* handled in userland */
1980 break;
1981 }
1982 }
1983
1984 /*
1985 * VM_EXITCODE_INST_EMUL could access the apic which could transform the
1986 * exit code into VM_EXITCODE_IPI.
1987 */
1988 if (error == 0 && vme->exitcode == VM_EXITCODE_IPI)
1989 error = vm_handle_ipi(vcpu, vme, &retu);
1990
1991 if (error == 0 && retu == false)
1992 goto restart;
1993
1994 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
1995 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode);
1996
1997 return (error);
1998 }
1999
2000 int
vm_restart_instruction(struct vcpu * vcpu)2001 vm_restart_instruction(struct vcpu *vcpu)
2002 {
2003 enum vcpu_state state;
2004 uint64_t rip;
2005 int error __diagused;
2006
2007 state = vcpu_get_state(vcpu, NULL);
2008 if (state == VCPU_RUNNING) {
2009 /*
2010 * When a vcpu is "running" the next instruction is determined
2011 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
2012 * Thus setting 'inst_length' to zero will cause the current
2013 * instruction to be restarted.
2014 */
2015 vcpu->exitinfo.inst_length = 0;
2016 VMM_CTR1(vcpu, "restarting instruction at %#lx by "
2017 "setting inst_length to zero", vcpu->exitinfo.rip);
2018 } else if (state == VCPU_FROZEN) {
2019 /*
2020 * When a vcpu is "frozen" it is outside the critical section
2021 * around vmmops_run() and 'nextrip' points to the next
2022 * instruction. Thus instruction restart is achieved by setting
2023 * 'nextrip' to the vcpu's %rip.
2024 */
2025 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
2026 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
2027 VMM_CTR2(vcpu, "restarting instruction by updating "
2028 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
2029 vcpu->nextrip = rip;
2030 } else {
2031 panic("%s: invalid state %d", __func__, state);
2032 }
2033 return (0);
2034 }
2035
2036 int
vm_exit_intinfo(struct vcpu * vcpu,uint64_t info)2037 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info)
2038 {
2039 int type, vector;
2040
2041 if (info & VM_INTINFO_VALID) {
2042 type = info & VM_INTINFO_TYPE;
2043 vector = info & 0xff;
2044 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
2045 return (EINVAL);
2046 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
2047 return (EINVAL);
2048 if (info & VM_INTINFO_RSVD)
2049 return (EINVAL);
2050 } else {
2051 info = 0;
2052 }
2053 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info);
2054 vcpu->exitintinfo = info;
2055 return (0);
2056 }
2057
2058 enum exc_class {
2059 EXC_BENIGN,
2060 EXC_CONTRIBUTORY,
2061 EXC_PAGEFAULT
2062 };
2063
2064 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
2065
2066 static enum exc_class
exception_class(uint64_t info)2067 exception_class(uint64_t info)
2068 {
2069 int type, vector;
2070
2071 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
2072 type = info & VM_INTINFO_TYPE;
2073 vector = info & 0xff;
2074
2075 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
2076 switch (type) {
2077 case VM_INTINFO_HWINTR:
2078 case VM_INTINFO_SWINTR:
2079 case VM_INTINFO_NMI:
2080 return (EXC_BENIGN);
2081 default:
2082 /*
2083 * Hardware exception.
2084 *
2085 * SVM and VT-x use identical type values to represent NMI,
2086 * hardware interrupt and software interrupt.
2087 *
2088 * SVM uses type '3' for all exceptions. VT-x uses type '3'
2089 * for exceptions except #BP and #OF. #BP and #OF use a type
2090 * value of '5' or '6'. Therefore we don't check for explicit
2091 * values of 'type' to classify 'intinfo' into a hardware
2092 * exception.
2093 */
2094 break;
2095 }
2096
2097 switch (vector) {
2098 case IDT_PF:
2099 case IDT_VE:
2100 return (EXC_PAGEFAULT);
2101 case IDT_DE:
2102 case IDT_TS:
2103 case IDT_NP:
2104 case IDT_SS:
2105 case IDT_GP:
2106 return (EXC_CONTRIBUTORY);
2107 default:
2108 return (EXC_BENIGN);
2109 }
2110 }
2111
2112 static int
nested_fault(struct vcpu * vcpu,uint64_t info1,uint64_t info2,uint64_t * retinfo)2113 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2,
2114 uint64_t *retinfo)
2115 {
2116 enum exc_class exc1, exc2;
2117 int type1, vector1;
2118
2119 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
2120 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
2121
2122 /*
2123 * If an exception occurs while attempting to call the double-fault
2124 * handler the processor enters shutdown mode (aka triple fault).
2125 */
2126 type1 = info1 & VM_INTINFO_TYPE;
2127 vector1 = info1 & 0xff;
2128 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
2129 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)",
2130 info1, info2);
2131 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
2132 *retinfo = 0;
2133 return (0);
2134 }
2135
2136 /*
2137 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
2138 */
2139 exc1 = exception_class(info1);
2140 exc2 = exception_class(info2);
2141 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
2142 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
2143 /* Convert nested fault into a double fault. */
2144 *retinfo = IDT_DF;
2145 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
2146 *retinfo |= VM_INTINFO_DEL_ERRCODE;
2147 } else {
2148 /* Handle exceptions serially */
2149 *retinfo = info2;
2150 }
2151 return (1);
2152 }
2153
2154 static uint64_t
vcpu_exception_intinfo(struct vcpu * vcpu)2155 vcpu_exception_intinfo(struct vcpu *vcpu)
2156 {
2157 uint64_t info = 0;
2158
2159 if (vcpu->exception_pending) {
2160 info = vcpu->exc_vector & 0xff;
2161 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
2162 if (vcpu->exc_errcode_valid) {
2163 info |= VM_INTINFO_DEL_ERRCODE;
2164 info |= (uint64_t)vcpu->exc_errcode << 32;
2165 }
2166 }
2167 return (info);
2168 }
2169
2170 int
vm_entry_intinfo(struct vcpu * vcpu,uint64_t * retinfo)2171 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
2172 {
2173 uint64_t info1, info2;
2174 int valid;
2175
2176 info1 = vcpu->exitintinfo;
2177 vcpu->exitintinfo = 0;
2178
2179 info2 = 0;
2180 if (vcpu->exception_pending) {
2181 info2 = vcpu_exception_intinfo(vcpu);
2182 vcpu->exception_pending = 0;
2183 VMM_CTR2(vcpu, "Exception %d delivered: %#lx",
2184 vcpu->exc_vector, info2);
2185 }
2186
2187 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
2188 valid = nested_fault(vcpu, info1, info2, retinfo);
2189 } else if (info1 & VM_INTINFO_VALID) {
2190 *retinfo = info1;
2191 valid = 1;
2192 } else if (info2 & VM_INTINFO_VALID) {
2193 *retinfo = info2;
2194 valid = 1;
2195 } else {
2196 valid = 0;
2197 }
2198
2199 if (valid) {
2200 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
2201 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
2202 }
2203
2204 return (valid);
2205 }
2206
2207 int
vm_get_intinfo(struct vcpu * vcpu,uint64_t * info1,uint64_t * info2)2208 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
2209 {
2210 *info1 = vcpu->exitintinfo;
2211 *info2 = vcpu_exception_intinfo(vcpu);
2212 return (0);
2213 }
2214
2215 int
vm_inject_exception(struct vcpu * vcpu,int vector,int errcode_valid,uint32_t errcode,int restart_instruction)2216 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
2217 uint32_t errcode, int restart_instruction)
2218 {
2219 uint64_t regval;
2220 int error __diagused;
2221
2222 if (vector < 0 || vector >= 32)
2223 return (EINVAL);
2224
2225 /*
2226 * A double fault exception should never be injected directly into
2227 * the guest. It is a derived exception that results from specific
2228 * combinations of nested faults.
2229 */
2230 if (vector == IDT_DF)
2231 return (EINVAL);
2232
2233 if (vcpu->exception_pending) {
2234 VMM_CTR2(vcpu, "Unable to inject exception %d due to "
2235 "pending exception %d", vector, vcpu->exc_vector);
2236 return (EBUSY);
2237 }
2238
2239 if (errcode_valid) {
2240 /*
2241 * Exceptions don't deliver an error code in real mode.
2242 */
2243 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val);
2244 KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
2245 if (!(regval & CR0_PE))
2246 errcode_valid = 0;
2247 }
2248
2249 /*
2250 * From section 26.6.1 "Interruptibility State" in Intel SDM:
2251 *
2252 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
2253 * one instruction or incurs an exception.
2254 */
2255 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0);
2256 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
2257 __func__, error));
2258
2259 if (restart_instruction)
2260 vm_restart_instruction(vcpu);
2261
2262 vcpu->exception_pending = 1;
2263 vcpu->exc_vector = vector;
2264 vcpu->exc_errcode = errcode;
2265 vcpu->exc_errcode_valid = errcode_valid;
2266 VMM_CTR1(vcpu, "Exception %d pending", vector);
2267 return (0);
2268 }
2269
2270 void
vm_inject_fault(struct vcpu * vcpu,int vector,int errcode_valid,int errcode)2271 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode)
2272 {
2273 int error __diagused, restart_instruction;
2274
2275 restart_instruction = 1;
2276
2277 error = vm_inject_exception(vcpu, vector, errcode_valid,
2278 errcode, restart_instruction);
2279 KASSERT(error == 0, ("vm_inject_exception error %d", error));
2280 }
2281
2282 void
vm_inject_pf(struct vcpu * vcpu,int error_code,uint64_t cr2)2283 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
2284 {
2285 int error __diagused;
2286
2287 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx",
2288 error_code, cr2);
2289
2290 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
2291 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2292
2293 vm_inject_fault(vcpu, IDT_PF, 1, error_code);
2294 }
2295
2296 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2297
2298 int
vm_inject_nmi(struct vcpu * vcpu)2299 vm_inject_nmi(struct vcpu *vcpu)
2300 {
2301
2302 vcpu->nmi_pending = 1;
2303 vcpu_notify_event(vcpu, false);
2304 return (0);
2305 }
2306
2307 int
vm_nmi_pending(struct vcpu * vcpu)2308 vm_nmi_pending(struct vcpu *vcpu)
2309 {
2310 return (vcpu->nmi_pending);
2311 }
2312
2313 void
vm_nmi_clear(struct vcpu * vcpu)2314 vm_nmi_clear(struct vcpu *vcpu)
2315 {
2316 if (vcpu->nmi_pending == 0)
2317 panic("vm_nmi_clear: inconsistent nmi_pending state");
2318
2319 vcpu->nmi_pending = 0;
2320 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
2321 }
2322
2323 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2324
2325 int
vm_inject_extint(struct vcpu * vcpu)2326 vm_inject_extint(struct vcpu *vcpu)
2327 {
2328
2329 vcpu->extint_pending = 1;
2330 vcpu_notify_event(vcpu, false);
2331 return (0);
2332 }
2333
2334 int
vm_extint_pending(struct vcpu * vcpu)2335 vm_extint_pending(struct vcpu *vcpu)
2336 {
2337 return (vcpu->extint_pending);
2338 }
2339
2340 void
vm_extint_clear(struct vcpu * vcpu)2341 vm_extint_clear(struct vcpu *vcpu)
2342 {
2343 if (vcpu->extint_pending == 0)
2344 panic("vm_extint_clear: inconsistent extint_pending state");
2345
2346 vcpu->extint_pending = 0;
2347 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
2348 }
2349
2350 int
vm_get_capability(struct vcpu * vcpu,int type,int * retval)2351 vm_get_capability(struct vcpu *vcpu, int type, int *retval)
2352 {
2353 if (type < 0 || type >= VM_CAP_MAX)
2354 return (EINVAL);
2355
2356 return (vmmops_getcap(vcpu->cookie, type, retval));
2357 }
2358
2359 int
vm_set_capability(struct vcpu * vcpu,int type,int val)2360 vm_set_capability(struct vcpu *vcpu, int type, int val)
2361 {
2362 if (type < 0 || type >= VM_CAP_MAX)
2363 return (EINVAL);
2364
2365 return (vmmops_setcap(vcpu->cookie, type, val));
2366 }
2367
2368 struct vm *
vcpu_vm(struct vcpu * vcpu)2369 vcpu_vm(struct vcpu *vcpu)
2370 {
2371 return (vcpu->vm);
2372 }
2373
2374 int
vcpu_vcpuid(struct vcpu * vcpu)2375 vcpu_vcpuid(struct vcpu *vcpu)
2376 {
2377 return (vcpu->vcpuid);
2378 }
2379
2380 struct vcpu *
vm_vcpu(struct vm * vm,int vcpuid)2381 vm_vcpu(struct vm *vm, int vcpuid)
2382 {
2383 return (vm->vcpu[vcpuid]);
2384 }
2385
2386 struct vlapic *
vm_lapic(struct vcpu * vcpu)2387 vm_lapic(struct vcpu *vcpu)
2388 {
2389 return (vcpu->vlapic);
2390 }
2391
2392 struct vioapic *
vm_ioapic(struct vm * vm)2393 vm_ioapic(struct vm *vm)
2394 {
2395
2396 return (vm->vioapic);
2397 }
2398
2399 struct vhpet *
vm_hpet(struct vm * vm)2400 vm_hpet(struct vm *vm)
2401 {
2402
2403 return (vm->vhpet);
2404 }
2405
2406 bool
vmm_is_pptdev(int bus,int slot,int func)2407 vmm_is_pptdev(int bus, int slot, int func)
2408 {
2409 int b, f, i, n, s;
2410 char *val, *cp, *cp2;
2411 bool found;
2412
2413 /*
2414 * XXX
2415 * The length of an environment variable is limited to 128 bytes which
2416 * puts an upper limit on the number of passthru devices that may be
2417 * specified using a single environment variable.
2418 *
2419 * Work around this by scanning multiple environment variable
2420 * names instead of a single one - yuck!
2421 */
2422 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2423
2424 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2425 found = false;
2426 for (i = 0; names[i] != NULL && !found; i++) {
2427 cp = val = kern_getenv(names[i]);
2428 while (cp != NULL && *cp != '\0') {
2429 if ((cp2 = strchr(cp, ' ')) != NULL)
2430 *cp2 = '\0';
2431
2432 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2433 if (n == 3 && bus == b && slot == s && func == f) {
2434 found = true;
2435 break;
2436 }
2437
2438 if (cp2 != NULL)
2439 *cp2++ = ' ';
2440
2441 cp = cp2;
2442 }
2443 freeenv(val);
2444 }
2445 return (found);
2446 }
2447
2448 void *
vm_iommu_domain(struct vm * vm)2449 vm_iommu_domain(struct vm *vm)
2450 {
2451
2452 return (vm->iommu);
2453 }
2454
2455 int
vcpu_set_state(struct vcpu * vcpu,enum vcpu_state newstate,bool from_idle)2456 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
2457 {
2458 int error;
2459
2460 vcpu_lock(vcpu);
2461 error = vcpu_set_state_locked(vcpu, newstate, from_idle);
2462 vcpu_unlock(vcpu);
2463
2464 return (error);
2465 }
2466
2467 enum vcpu_state
vcpu_get_state(struct vcpu * vcpu,int * hostcpu)2468 vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
2469 {
2470 enum vcpu_state state;
2471
2472 vcpu_lock(vcpu);
2473 state = vcpu->state;
2474 if (hostcpu != NULL)
2475 *hostcpu = vcpu->hostcpu;
2476 vcpu_unlock(vcpu);
2477
2478 return (state);
2479 }
2480
2481 int
vm_activate_cpu(struct vcpu * vcpu)2482 vm_activate_cpu(struct vcpu *vcpu)
2483 {
2484 struct vm *vm = vcpu->vm;
2485
2486 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2487 return (EBUSY);
2488
2489 VMM_CTR0(vcpu, "activated");
2490 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
2491 return (0);
2492 }
2493
2494 int
vm_suspend_cpu(struct vm * vm,struct vcpu * vcpu)2495 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
2496 {
2497 if (vcpu == NULL) {
2498 vm->debug_cpus = vm->active_cpus;
2499 for (int i = 0; i < vm->maxcpus; i++) {
2500 if (CPU_ISSET(i, &vm->active_cpus))
2501 vcpu_notify_event(vm_vcpu(vm, i), false);
2502 }
2503 } else {
2504 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2505 return (EINVAL);
2506
2507 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2508 vcpu_notify_event(vcpu, false);
2509 }
2510 return (0);
2511 }
2512
2513 int
vm_resume_cpu(struct vm * vm,struct vcpu * vcpu)2514 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
2515 {
2516
2517 if (vcpu == NULL) {
2518 CPU_ZERO(&vm->debug_cpus);
2519 } else {
2520 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
2521 return (EINVAL);
2522
2523 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2524 }
2525 return (0);
2526 }
2527
2528 int
vcpu_debugged(struct vcpu * vcpu)2529 vcpu_debugged(struct vcpu *vcpu)
2530 {
2531
2532 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
2533 }
2534
2535 cpuset_t
vm_active_cpus(struct vm * vm)2536 vm_active_cpus(struct vm *vm)
2537 {
2538
2539 return (vm->active_cpus);
2540 }
2541
2542 cpuset_t
vm_debug_cpus(struct vm * vm)2543 vm_debug_cpus(struct vm *vm)
2544 {
2545
2546 return (vm->debug_cpus);
2547 }
2548
2549 cpuset_t
vm_suspended_cpus(struct vm * vm)2550 vm_suspended_cpus(struct vm *vm)
2551 {
2552
2553 return (vm->suspended_cpus);
2554 }
2555
2556 /*
2557 * Returns the subset of vCPUs in tostart that are awaiting startup.
2558 * These vCPUs are also marked as no longer awaiting startup.
2559 */
2560 cpuset_t
vm_start_cpus(struct vm * vm,const cpuset_t * tostart)2561 vm_start_cpus(struct vm *vm, const cpuset_t *tostart)
2562 {
2563 cpuset_t set;
2564
2565 mtx_lock(&vm->rendezvous_mtx);
2566 CPU_AND(&set, &vm->startup_cpus, tostart);
2567 CPU_ANDNOT(&vm->startup_cpus, &vm->startup_cpus, &set);
2568 mtx_unlock(&vm->rendezvous_mtx);
2569 return (set);
2570 }
2571
2572 void
vm_await_start(struct vm * vm,const cpuset_t * waiting)2573 vm_await_start(struct vm *vm, const cpuset_t *waiting)
2574 {
2575 mtx_lock(&vm->rendezvous_mtx);
2576 CPU_OR(&vm->startup_cpus, &vm->startup_cpus, waiting);
2577 mtx_unlock(&vm->rendezvous_mtx);
2578 }
2579
2580 void *
vcpu_stats(struct vcpu * vcpu)2581 vcpu_stats(struct vcpu *vcpu)
2582 {
2583
2584 return (vcpu->stats);
2585 }
2586
2587 int
vm_get_x2apic_state(struct vcpu * vcpu,enum x2apic_state * state)2588 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
2589 {
2590 *state = vcpu->x2apic_state;
2591
2592 return (0);
2593 }
2594
2595 int
vm_set_x2apic_state(struct vcpu * vcpu,enum x2apic_state state)2596 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
2597 {
2598 if (state >= X2APIC_STATE_LAST)
2599 return (EINVAL);
2600
2601 vcpu->x2apic_state = state;
2602
2603 vlapic_set_x2apic_state(vcpu, state);
2604
2605 return (0);
2606 }
2607
2608 /*
2609 * This function is called to ensure that a vcpu "sees" a pending event
2610 * as soon as possible:
2611 * - If the vcpu thread is sleeping then it is woken up.
2612 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2613 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2614 */
2615 static void
vcpu_notify_event_locked(struct vcpu * vcpu,bool lapic_intr)2616 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2617 {
2618 int hostcpu;
2619
2620 hostcpu = vcpu->hostcpu;
2621 if (vcpu->state == VCPU_RUNNING) {
2622 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2623 if (hostcpu != curcpu) {
2624 if (lapic_intr) {
2625 vlapic_post_intr(vcpu->vlapic, hostcpu,
2626 vmm_ipinum);
2627 } else {
2628 ipi_cpu(hostcpu, vmm_ipinum);
2629 }
2630 } else {
2631 /*
2632 * If the 'vcpu' is running on 'curcpu' then it must
2633 * be sending a notification to itself (e.g. SELF_IPI).
2634 * The pending event will be picked up when the vcpu
2635 * transitions back to guest context.
2636 */
2637 }
2638 } else {
2639 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2640 "with hostcpu %d", vcpu->state, hostcpu));
2641 if (vcpu->state == VCPU_SLEEPING)
2642 wakeup_one(vcpu);
2643 }
2644 }
2645
2646 void
vcpu_notify_event(struct vcpu * vcpu,bool lapic_intr)2647 vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
2648 {
2649 vcpu_lock(vcpu);
2650 vcpu_notify_event_locked(vcpu, lapic_intr);
2651 vcpu_unlock(vcpu);
2652 }
2653
2654 struct vmspace *
vm_get_vmspace(struct vm * vm)2655 vm_get_vmspace(struct vm *vm)
2656 {
2657
2658 return (vm->vmspace);
2659 }
2660
2661 int
vm_apicid2vcpuid(struct vm * vm,int apicid)2662 vm_apicid2vcpuid(struct vm *vm, int apicid)
2663 {
2664 /*
2665 * XXX apic id is assumed to be numerically identical to vcpu id
2666 */
2667 return (apicid);
2668 }
2669
2670 int
vm_smp_rendezvous(struct vcpu * vcpu,cpuset_t dest,vm_rendezvous_func_t func,void * arg)2671 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
2672 vm_rendezvous_func_t func, void *arg)
2673 {
2674 struct vm *vm = vcpu->vm;
2675 int error, i;
2676
2677 /*
2678 * Enforce that this function is called without any locks
2679 */
2680 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2681
2682 restart:
2683 mtx_lock(&vm->rendezvous_mtx);
2684 if (vm->rendezvous_func != NULL) {
2685 /*
2686 * If a rendezvous is already in progress then we need to
2687 * call the rendezvous handler in case this 'vcpu' is one
2688 * of the targets of the rendezvous.
2689 */
2690 VMM_CTR0(vcpu, "Rendezvous already in progress");
2691 mtx_unlock(&vm->rendezvous_mtx);
2692 error = vm_handle_rendezvous(vcpu);
2693 if (error != 0)
2694 return (error);
2695 goto restart;
2696 }
2697 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2698 "rendezvous is still in progress"));
2699
2700 VMM_CTR0(vcpu, "Initiating rendezvous");
2701 vm->rendezvous_req_cpus = dest;
2702 CPU_ZERO(&vm->rendezvous_done_cpus);
2703 vm->rendezvous_arg = arg;
2704 vm->rendezvous_func = func;
2705 mtx_unlock(&vm->rendezvous_mtx);
2706
2707 /*
2708 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2709 * vcpus so they handle the rendezvous as soon as possible.
2710 */
2711 for (i = 0; i < vm->maxcpus; i++) {
2712 if (CPU_ISSET(i, &dest))
2713 vcpu_notify_event(vm_vcpu(vm, i), false);
2714 }
2715
2716 return (vm_handle_rendezvous(vcpu));
2717 }
2718
2719 struct vatpic *
vm_atpic(struct vm * vm)2720 vm_atpic(struct vm *vm)
2721 {
2722 return (vm->vatpic);
2723 }
2724
2725 struct vatpit *
vm_atpit(struct vm * vm)2726 vm_atpit(struct vm *vm)
2727 {
2728 return (vm->vatpit);
2729 }
2730
2731 struct vpmtmr *
vm_pmtmr(struct vm * vm)2732 vm_pmtmr(struct vm *vm)
2733 {
2734
2735 return (vm->vpmtmr);
2736 }
2737
2738 struct vrtc *
vm_rtc(struct vm * vm)2739 vm_rtc(struct vm *vm)
2740 {
2741
2742 return (vm->vrtc);
2743 }
2744
2745 enum vm_reg_name
vm_segment_name(int seg)2746 vm_segment_name(int seg)
2747 {
2748 static enum vm_reg_name seg_names[] = {
2749 VM_REG_GUEST_ES,
2750 VM_REG_GUEST_CS,
2751 VM_REG_GUEST_SS,
2752 VM_REG_GUEST_DS,
2753 VM_REG_GUEST_FS,
2754 VM_REG_GUEST_GS
2755 };
2756
2757 KASSERT(seg >= 0 && seg < nitems(seg_names),
2758 ("%s: invalid segment encoding %d", __func__, seg));
2759 return (seg_names[seg]);
2760 }
2761
2762 void
vm_copy_teardown(struct vm_copyinfo * copyinfo,int num_copyinfo)2763 vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo)
2764 {
2765 int idx;
2766
2767 for (idx = 0; idx < num_copyinfo; idx++) {
2768 if (copyinfo[idx].cookie != NULL)
2769 vm_gpa_release(copyinfo[idx].cookie);
2770 }
2771 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2772 }
2773
2774 int
vm_copy_setup(struct vcpu * vcpu,struct vm_guest_paging * paging,uint64_t gla,size_t len,int prot,struct vm_copyinfo * copyinfo,int num_copyinfo,int * fault)2775 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
2776 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2777 int num_copyinfo, int *fault)
2778 {
2779 int error, idx, nused;
2780 size_t n, off, remaining;
2781 void *hva, *cookie;
2782 uint64_t gpa;
2783
2784 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2785
2786 nused = 0;
2787 remaining = len;
2788 while (remaining > 0) {
2789 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2790 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
2791 if (error || *fault)
2792 return (error);
2793 off = gpa & PAGE_MASK;
2794 n = min(remaining, PAGE_SIZE - off);
2795 copyinfo[nused].gpa = gpa;
2796 copyinfo[nused].len = n;
2797 remaining -= n;
2798 gla += n;
2799 nused++;
2800 }
2801
2802 for (idx = 0; idx < nused; idx++) {
2803 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,
2804 copyinfo[idx].len, prot, &cookie);
2805 if (hva == NULL)
2806 break;
2807 copyinfo[idx].hva = hva;
2808 copyinfo[idx].cookie = cookie;
2809 }
2810
2811 if (idx != nused) {
2812 vm_copy_teardown(copyinfo, num_copyinfo);
2813 return (EFAULT);
2814 } else {
2815 *fault = 0;
2816 return (0);
2817 }
2818 }
2819
2820 void
vm_copyin(struct vm_copyinfo * copyinfo,void * kaddr,size_t len)2821 vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len)
2822 {
2823 char *dst;
2824 int idx;
2825
2826 dst = kaddr;
2827 idx = 0;
2828 while (len > 0) {
2829 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2830 len -= copyinfo[idx].len;
2831 dst += copyinfo[idx].len;
2832 idx++;
2833 }
2834 }
2835
2836 void
vm_copyout(const void * kaddr,struct vm_copyinfo * copyinfo,size_t len)2837 vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len)
2838 {
2839 const char *src;
2840 int idx;
2841
2842 src = kaddr;
2843 idx = 0;
2844 while (len > 0) {
2845 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2846 len -= copyinfo[idx].len;
2847 src += copyinfo[idx].len;
2848 idx++;
2849 }
2850 }
2851
2852 /*
2853 * Return the amount of in-use and wired memory for the VM. Since
2854 * these are global stats, only return the values with for vCPU 0
2855 */
2856 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2857 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2858
2859 static void
vm_get_rescnt(struct vcpu * vcpu,struct vmm_stat_type * stat)2860 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
2861 {
2862
2863 if (vcpu->vcpuid == 0) {
2864 vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
2865 vmspace_resident_count(vcpu->vm->vmspace));
2866 }
2867 }
2868
2869 static void
vm_get_wiredcnt(struct vcpu * vcpu,struct vmm_stat_type * stat)2870 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
2871 {
2872
2873 if (vcpu->vcpuid == 0) {
2874 vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
2875 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
2876 }
2877 }
2878
2879 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2880 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
2881
2882 #ifdef BHYVE_SNAPSHOT
2883 static int
vm_snapshot_vcpus(struct vm * vm,struct vm_snapshot_meta * meta)2884 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta)
2885 {
2886 uint64_t tsc, now;
2887 int ret;
2888 struct vcpu *vcpu;
2889 uint16_t i, maxcpus;
2890
2891 now = rdtsc();
2892 maxcpus = vm_get_maxcpus(vm);
2893 for (i = 0; i < maxcpus; i++) {
2894 vcpu = vm->vcpu[i];
2895 if (vcpu == NULL)
2896 continue;
2897
2898 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done);
2899 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done);
2900 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done);
2901 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done);
2902 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done);
2903 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done);
2904 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done);
2905 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
2906
2907 /*
2908 * Save the absolute TSC value by adding now to tsc_offset.
2909 *
2910 * It will be turned turned back into an actual offset when the
2911 * TSC restore function is called
2912 */
2913 tsc = now + vcpu->tsc_offset;
2914 SNAPSHOT_VAR_OR_LEAVE(tsc, meta, ret, done);
2915 if (meta->op == VM_SNAPSHOT_RESTORE)
2916 vcpu->tsc_offset = tsc;
2917 }
2918
2919 done:
2920 return (ret);
2921 }
2922
2923 static int
vm_snapshot_vm(struct vm * vm,struct vm_snapshot_meta * meta)2924 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta)
2925 {
2926 int ret;
2927
2928 ret = vm_snapshot_vcpus(vm, meta);
2929 if (ret != 0)
2930 goto done;
2931
2932 SNAPSHOT_VAR_OR_LEAVE(vm->startup_cpus, meta, ret, done);
2933 done:
2934 return (ret);
2935 }
2936
2937 static int
vm_snapshot_vcpu(struct vm * vm,struct vm_snapshot_meta * meta)2938 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta)
2939 {
2940 int error;
2941 struct vcpu *vcpu;
2942 uint16_t i, maxcpus;
2943
2944 error = 0;
2945
2946 maxcpus = vm_get_maxcpus(vm);
2947 for (i = 0; i < maxcpus; i++) {
2948 vcpu = vm->vcpu[i];
2949 if (vcpu == NULL)
2950 continue;
2951
2952 error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
2953 if (error != 0) {
2954 printf("%s: failed to snapshot vmcs/vmcb data for "
2955 "vCPU: %d; error: %d\n", __func__, i, error);
2956 goto done;
2957 }
2958 }
2959
2960 done:
2961 return (error);
2962 }
2963
2964 /*
2965 * Save kernel-side structures to user-space for snapshotting.
2966 */
2967 int
vm_snapshot_req(struct vm * vm,struct vm_snapshot_meta * meta)2968 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
2969 {
2970 int ret = 0;
2971
2972 switch (meta->dev_req) {
2973 case STRUCT_VMCX:
2974 ret = vm_snapshot_vcpu(vm, meta);
2975 break;
2976 case STRUCT_VM:
2977 ret = vm_snapshot_vm(vm, meta);
2978 break;
2979 case STRUCT_VIOAPIC:
2980 ret = vioapic_snapshot(vm_ioapic(vm), meta);
2981 break;
2982 case STRUCT_VLAPIC:
2983 ret = vlapic_snapshot(vm, meta);
2984 break;
2985 case STRUCT_VHPET:
2986 ret = vhpet_snapshot(vm_hpet(vm), meta);
2987 break;
2988 case STRUCT_VATPIC:
2989 ret = vatpic_snapshot(vm_atpic(vm), meta);
2990 break;
2991 case STRUCT_VATPIT:
2992 ret = vatpit_snapshot(vm_atpit(vm), meta);
2993 break;
2994 case STRUCT_VPMTMR:
2995 ret = vpmtmr_snapshot(vm_pmtmr(vm), meta);
2996 break;
2997 case STRUCT_VRTC:
2998 ret = vrtc_snapshot(vm_rtc(vm), meta);
2999 break;
3000 default:
3001 printf("%s: failed to find the requested type %#x\n",
3002 __func__, meta->dev_req);
3003 ret = (EINVAL);
3004 }
3005 return (ret);
3006 }
3007
3008 void
vm_set_tsc_offset(struct vcpu * vcpu,uint64_t offset)3009 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset)
3010 {
3011 vcpu->tsc_offset = offset;
3012 }
3013
3014 int
vm_restore_time(struct vm * vm)3015 vm_restore_time(struct vm *vm)
3016 {
3017 int error;
3018 uint64_t now;
3019 struct vcpu *vcpu;
3020 uint16_t i, maxcpus;
3021
3022 now = rdtsc();
3023
3024 error = vhpet_restore_time(vm_hpet(vm));
3025 if (error)
3026 return (error);
3027
3028 maxcpus = vm_get_maxcpus(vm);
3029 for (i = 0; i < maxcpus; i++) {
3030 vcpu = vm->vcpu[i];
3031 if (vcpu == NULL)
3032 continue;
3033
3034 error = vmmops_restore_tsc(vcpu->cookie,
3035 vcpu->tsc_offset - now);
3036 if (error)
3037 return (error);
3038 }
3039
3040 return (0);
3041 }
3042 #endif
3043