1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #ifndef _VMM_H_
30 #define _VMM_H_
31
32 #include <sys/cpuset.h>
33 #include <sys/sdt.h>
34 #include <x86/segments.h>
35
36 struct vcpu;
37 struct vm_snapshot_meta;
38
39 #ifdef _KERNEL
40 SDT_PROVIDER_DECLARE(vmm);
41 #endif
42
43 enum vm_suspend_how {
44 VM_SUSPEND_NONE,
45 VM_SUSPEND_RESET,
46 VM_SUSPEND_POWEROFF,
47 VM_SUSPEND_HALT,
48 VM_SUSPEND_TRIPLEFAULT,
49 VM_SUSPEND_LAST
50 };
51
52 /*
53 * Identifiers for architecturally defined registers.
54 */
55 enum vm_reg_name {
56 VM_REG_GUEST_RAX,
57 VM_REG_GUEST_RBX,
58 VM_REG_GUEST_RCX,
59 VM_REG_GUEST_RDX,
60 VM_REG_GUEST_RSI,
61 VM_REG_GUEST_RDI,
62 VM_REG_GUEST_RBP,
63 VM_REG_GUEST_R8,
64 VM_REG_GUEST_R9,
65 VM_REG_GUEST_R10,
66 VM_REG_GUEST_R11,
67 VM_REG_GUEST_R12,
68 VM_REG_GUEST_R13,
69 VM_REG_GUEST_R14,
70 VM_REG_GUEST_R15,
71 VM_REG_GUEST_CR0,
72 VM_REG_GUEST_CR3,
73 VM_REG_GUEST_CR4,
74 VM_REG_GUEST_DR7,
75 VM_REG_GUEST_RSP,
76 VM_REG_GUEST_RIP,
77 VM_REG_GUEST_RFLAGS,
78 VM_REG_GUEST_ES,
79 VM_REG_GUEST_CS,
80 VM_REG_GUEST_SS,
81 VM_REG_GUEST_DS,
82 VM_REG_GUEST_FS,
83 VM_REG_GUEST_GS,
84 VM_REG_GUEST_LDTR,
85 VM_REG_GUEST_TR,
86 VM_REG_GUEST_IDTR,
87 VM_REG_GUEST_GDTR,
88 VM_REG_GUEST_EFER,
89 VM_REG_GUEST_CR2,
90 VM_REG_GUEST_PDPTE0,
91 VM_REG_GUEST_PDPTE1,
92 VM_REG_GUEST_PDPTE2,
93 VM_REG_GUEST_PDPTE3,
94 VM_REG_GUEST_INTR_SHADOW,
95 VM_REG_GUEST_DR0,
96 VM_REG_GUEST_DR1,
97 VM_REG_GUEST_DR2,
98 VM_REG_GUEST_DR3,
99 VM_REG_GUEST_DR6,
100 VM_REG_GUEST_ENTRY_INST_LENGTH,
101 VM_REG_GUEST_FS_BASE,
102 VM_REG_GUEST_GS_BASE,
103 VM_REG_GUEST_KGS_BASE,
104 VM_REG_GUEST_TPR,
105 VM_REG_LAST
106 };
107
108 enum x2apic_state {
109 X2APIC_DISABLED,
110 X2APIC_ENABLED,
111 X2APIC_STATE_LAST
112 };
113
114 #define VM_INTINFO_VECTOR(info) ((info) & 0xff)
115 #define VM_INTINFO_DEL_ERRCODE 0x800
116 #define VM_INTINFO_RSVD 0x7ffff000
117 #define VM_INTINFO_VALID 0x80000000
118 #define VM_INTINFO_TYPE 0x700
119 #define VM_INTINFO_HWINTR (0 << 8)
120 #define VM_INTINFO_NMI (2 << 8)
121 #define VM_INTINFO_HWEXCEPTION (3 << 8)
122 #define VM_INTINFO_SWINTR (4 << 8)
123
124 /*
125 * The VM name has to fit into the pathname length constraints of devfs,
126 * governed primarily by SPECNAMELEN. The length is the total number of
127 * characters in the full path, relative to the mount point and not
128 * including any leading '/' characters.
129 * A prefix and a suffix are added to the name specified by the user.
130 * The prefix is usually "vmm/" or "vmm.io/", but can be a few characters
131 * longer for future use.
132 * The suffix is a string that identifies a bootrom image or some similar
133 * image that is attached to the VM. A separator character gets added to
134 * the suffix automatically when generating the full path, so it must be
135 * accounted for, reducing the effective length by 1.
136 * The effective length of a VM name is 229 bytes for FreeBSD 13 and 37
137 * bytes for FreeBSD 12. A minimum length is set for safety and supports
138 * a SPECNAMELEN as small as 32 on old systems.
139 */
140 #define VM_MAX_PREFIXLEN 10
141 #define VM_MAX_SUFFIXLEN 15
142 #define VM_MIN_NAMELEN 6
143 #define VM_MAX_NAMELEN \
144 (SPECNAMELEN - VM_MAX_PREFIXLEN - VM_MAX_SUFFIXLEN - 1)
145
146 #ifdef _KERNEL
147 CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN);
148
149 struct vm;
150 struct vm_exception;
151 struct seg_desc;
152 struct vm_exit;
153 struct vm_run;
154 struct vhpet;
155 struct vioapic;
156 struct vlapic;
157 struct vmspace;
158 struct vm_object;
159 struct vm_guest_paging;
160 struct pmap;
161 enum snapshot_req;
162
163 struct vm_eventinfo {
164 cpuset_t *rptr; /* rendezvous cookie */
165 int *sptr; /* suspend cookie */
166 int *iptr; /* reqidle cookie */
167 };
168
169 typedef int (*vmm_init_func_t)(int ipinum);
170 typedef int (*vmm_cleanup_func_t)(void);
171 typedef void (*vmm_resume_func_t)(void);
172 typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
173 typedef int (*vmi_run_func_t)(void *vcpui, register_t rip,
174 struct pmap *pmap, struct vm_eventinfo *info);
175 typedef void (*vmi_cleanup_func_t)(void *vmi);
176 typedef void * (*vmi_vcpu_init_func_t)(void *vmi, struct vcpu *vcpu,
177 int vcpu_id);
178 typedef void (*vmi_vcpu_cleanup_func_t)(void *vcpui);
179 typedef int (*vmi_get_register_t)(void *vcpui, int num, uint64_t *retval);
180 typedef int (*vmi_set_register_t)(void *vcpui, int num, uint64_t val);
181 typedef int (*vmi_get_desc_t)(void *vcpui, int num, struct seg_desc *desc);
182 typedef int (*vmi_set_desc_t)(void *vcpui, int num, struct seg_desc *desc);
183 typedef int (*vmi_get_cap_t)(void *vcpui, int num, int *retval);
184 typedef int (*vmi_set_cap_t)(void *vcpui, int num, int val);
185 typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
186 typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
187 typedef struct vlapic * (*vmi_vlapic_init)(void *vcpui);
188 typedef void (*vmi_vlapic_cleanup)(struct vlapic *vlapic);
189 typedef int (*vmi_snapshot_vcpu_t)(void *vcpui, struct vm_snapshot_meta *meta);
190 typedef int (*vmi_restore_tsc_t)(void *vcpui, uint64_t now);
191
192 struct vmm_ops {
193 vmm_init_func_t modinit; /* module wide initialization */
194 vmm_cleanup_func_t modcleanup;
195 vmm_resume_func_t modresume;
196
197 vmi_init_func_t init; /* vm-specific initialization */
198 vmi_run_func_t run;
199 vmi_cleanup_func_t cleanup;
200 vmi_vcpu_init_func_t vcpu_init;
201 vmi_vcpu_cleanup_func_t vcpu_cleanup;
202 vmi_get_register_t getreg;
203 vmi_set_register_t setreg;
204 vmi_get_desc_t getdesc;
205 vmi_set_desc_t setdesc;
206 vmi_get_cap_t getcap;
207 vmi_set_cap_t setcap;
208 vmi_vmspace_alloc vmspace_alloc;
209 vmi_vmspace_free vmspace_free;
210 vmi_vlapic_init vlapic_init;
211 vmi_vlapic_cleanup vlapic_cleanup;
212
213 /* checkpoint operations */
214 vmi_snapshot_vcpu_t vcpu_snapshot;
215 vmi_restore_tsc_t restore_tsc;
216 };
217
218 extern const struct vmm_ops vmm_ops_intel;
219 extern const struct vmm_ops vmm_ops_amd;
220
221 extern u_int vm_maxcpu; /* maximum virtual cpus */
222
223 int vm_create(const char *name, struct vm **retvm);
224 struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
225 void vm_disable_vcpu_creation(struct vm *vm);
226 void vm_slock_vcpus(struct vm *vm);
227 void vm_unlock_vcpus(struct vm *vm);
228 void vm_destroy(struct vm *vm);
229 int vm_reinit(struct vm *vm);
230 const char *vm_name(struct vm *vm);
231 uint16_t vm_get_maxcpus(struct vm *vm);
232 void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
233 uint16_t *threads, uint16_t *maxcpus);
234 int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
235 uint16_t threads, uint16_t maxcpus);
236
237 /*
238 * APIs that modify the guest memory map require all vcpus to be frozen.
239 */
240 void vm_slock_memsegs(struct vm *vm);
241 void vm_xlock_memsegs(struct vm *vm);
242 void vm_unlock_memsegs(struct vm *vm);
243 int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
244 size_t len, int prot, int flags);
245 int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
246 int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
247 void vm_free_memseg(struct vm *vm, int ident);
248 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
249 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
250 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
251 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
252
253 /*
254 * APIs that inspect the guest memory map require only a *single* vcpu to
255 * be frozen. This acts like a read lock on the guest memory map since any
256 * modification requires *all* vcpus to be frozen.
257 */
258 int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
259 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
260 int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
261 struct vm_object **objptr);
262 vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
263 void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
264 int prot, void **cookie);
265 void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
266 int prot, void **cookie);
267 void vm_gpa_release(void *cookie);
268 bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
269
270 int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
271 int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
272 int vm_get_seg_desc(struct vcpu *vcpu, int reg,
273 struct seg_desc *ret_desc);
274 int vm_set_seg_desc(struct vcpu *vcpu, int reg,
275 struct seg_desc *desc);
276 int vm_run(struct vcpu *vcpu);
277 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
278 int vm_inject_nmi(struct vcpu *vcpu);
279 int vm_nmi_pending(struct vcpu *vcpu);
280 void vm_nmi_clear(struct vcpu *vcpu);
281 int vm_inject_extint(struct vcpu *vcpu);
282 int vm_extint_pending(struct vcpu *vcpu);
283 void vm_extint_clear(struct vcpu *vcpu);
284 int vcpu_vcpuid(struct vcpu *vcpu);
285 struct vm *vcpu_vm(struct vcpu *vcpu);
286 struct vcpu *vm_vcpu(struct vm *vm, int cpu);
287 struct vlapic *vm_lapic(struct vcpu *vcpu);
288 struct vioapic *vm_ioapic(struct vm *vm);
289 struct vhpet *vm_hpet(struct vm *vm);
290 int vm_get_capability(struct vcpu *vcpu, int type, int *val);
291 int vm_set_capability(struct vcpu *vcpu, int type, int val);
292 int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state);
293 int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state);
294 int vm_apicid2vcpuid(struct vm *vm, int apicid);
295 int vm_activate_cpu(struct vcpu *vcpu);
296 int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
297 int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
298 int vm_restart_instruction(struct vcpu *vcpu);
299 struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
300 cpuset_t *vm_exitinfo_cpuset(struct vcpu *vcpu);
301 void vm_exit_suspended(struct vcpu *vcpu, uint64_t rip);
302 void vm_exit_debug(struct vcpu *vcpu, uint64_t rip);
303 void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip);
304 void vm_exit_astpending(struct vcpu *vcpu, uint64_t rip);
305 void vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip);
306 int vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta);
307 int vm_restore_time(struct vm *vm);
308
309 #ifdef _SYS__CPUSET_H_
310 /*
311 * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
312 * The rendezvous 'func(arg)' is not allowed to do anything that will
313 * cause the thread to be put to sleep.
314 *
315 * The caller cannot hold any locks when initiating the rendezvous.
316 *
317 * The implementation of this API may cause vcpus other than those specified
318 * by 'dest' to be stalled. The caller should not rely on any vcpus making
319 * forward progress when the rendezvous is in progress.
320 */
321 typedef void (*vm_rendezvous_func_t)(struct vcpu *vcpu, void *arg);
322 int vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
323 vm_rendezvous_func_t func, void *arg);
324
325 cpuset_t vm_active_cpus(struct vm *vm);
326 cpuset_t vm_debug_cpus(struct vm *vm);
327 cpuset_t vm_suspended_cpus(struct vm *vm);
328 cpuset_t vm_start_cpus(struct vm *vm, const cpuset_t *tostart);
329 void vm_await_start(struct vm *vm, const cpuset_t *waiting);
330 #endif /* _SYS__CPUSET_H_ */
331
332 static __inline int
vcpu_rendezvous_pending(struct vcpu * vcpu,struct vm_eventinfo * info)333 vcpu_rendezvous_pending(struct vcpu *vcpu, struct vm_eventinfo *info)
334 {
335 /*
336 * This check isn't done with atomic operations or under a lock because
337 * there's no need to. If the vcpuid bit is set, the vcpu is part of a
338 * rendezvous and the bit won't be cleared until the vcpu enters the
339 * rendezvous. On rendezvous exit, the cpuset is cleared and the vcpu
340 * will see an empty cpuset. So, the races are harmless.
341 */
342 return (CPU_ISSET(vcpu_vcpuid(vcpu), info->rptr));
343 }
344
345 static __inline int
vcpu_suspended(struct vm_eventinfo * info)346 vcpu_suspended(struct vm_eventinfo *info)
347 {
348
349 return (*info->sptr);
350 }
351
352 static __inline int
vcpu_reqidle(struct vm_eventinfo * info)353 vcpu_reqidle(struct vm_eventinfo *info)
354 {
355
356 return (*info->iptr);
357 }
358
359 int vcpu_debugged(struct vcpu *vcpu);
360
361 /*
362 * Return true if device indicated by bus/slot/func is supposed to be a
363 * pci passthrough device.
364 *
365 * Return false otherwise.
366 */
367 bool vmm_is_pptdev(int bus, int slot, int func);
368
369 void *vm_iommu_domain(struct vm *vm);
370
371 enum vcpu_state {
372 VCPU_IDLE,
373 VCPU_FROZEN,
374 VCPU_RUNNING,
375 VCPU_SLEEPING,
376 };
377
378 int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
379 enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
380
381 static int __inline
vcpu_is_running(struct vcpu * vcpu,int * hostcpu)382 vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
383 {
384 return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
385 }
386
387 #ifdef _SYS_PROC_H_
388 static int __inline
vcpu_should_yield(struct vcpu * vcpu)389 vcpu_should_yield(struct vcpu *vcpu)
390 {
391 struct thread *td;
392
393 td = curthread;
394 return (td->td_ast != 0 || td->td_owepreempt != 0);
395 }
396 #endif
397
398 void *vcpu_stats(struct vcpu *vcpu);
399 void vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr);
400 struct vmspace *vm_get_vmspace(struct vm *vm);
401 struct vatpic *vm_atpic(struct vm *vm);
402 struct vatpit *vm_atpit(struct vm *vm);
403 struct vpmtmr *vm_pmtmr(struct vm *vm);
404 struct vrtc *vm_rtc(struct vm *vm);
405
406 /*
407 * Inject exception 'vector' into the guest vcpu. This function returns 0 on
408 * success and non-zero on failure.
409 *
410 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
411 * this function directly because they enforce the trap-like or fault-like
412 * behavior of an exception.
413 *
414 * This function should only be called in the context of the thread that is
415 * executing this vcpu.
416 */
417 int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid,
418 uint32_t errcode, int restart_instruction);
419
420 /*
421 * This function is called after a VM-exit that occurred during exception or
422 * interrupt delivery through the IDT. The format of 'intinfo' is described
423 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
424 *
425 * If a VM-exit handler completes the event delivery successfully then it
426 * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
427 * if the task switch emulation is triggered via a task gate then it should
428 * call this function with 'intinfo=0' to indicate that the external event
429 * is not pending anymore.
430 *
431 * Return value is 0 on success and non-zero on failure.
432 */
433 int vm_exit_intinfo(struct vcpu *vcpu, uint64_t intinfo);
434
435 /*
436 * This function is called before every VM-entry to retrieve a pending
437 * event that should be injected into the guest. This function combines
438 * nested events into a double or triple fault.
439 *
440 * Returns 0 if there are no events that need to be injected into the guest
441 * and non-zero otherwise.
442 */
443 int vm_entry_intinfo(struct vcpu *vcpu, uint64_t *info);
444
445 int vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2);
446
447 /*
448 * Function used to keep track of the guest's TSC offset. The
449 * offset is used by the virtualization extensions to provide a consistent
450 * value for the Time Stamp Counter to the guest.
451 */
452 void vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset);
453
454 enum vm_reg_name vm_segment_name(int seg_encoding);
455
456 struct vm_copyinfo {
457 uint64_t gpa;
458 size_t len;
459 void *hva;
460 void *cookie;
461 };
462
463 /*
464 * Set up 'copyinfo[]' to copy to/from guest linear address space starting
465 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
466 * a copyin or PROT_WRITE for a copyout.
467 *
468 * retval is_fault Interpretation
469 * 0 0 Success
470 * 0 1 An exception was injected into the guest
471 * EFAULT N/A Unrecoverable error
472 *
473 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
474 * the return value is 0. The 'copyinfo[]' resources should be freed by calling
475 * 'vm_copy_teardown()' after the copy is done.
476 */
477 int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
478 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
479 int num_copyinfo, int *is_fault);
480 void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
481 void vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len);
482 void vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len);
483
484 int vcpu_trace_exceptions(struct vcpu *vcpu);
485 int vcpu_trap_wbinvd(struct vcpu *vcpu);
486 #endif /* KERNEL */
487
488 /*
489 * Identifiers for optional vmm capabilities
490 */
491 enum vm_cap_type {
492 VM_CAP_HALT_EXIT,
493 VM_CAP_MTRAP_EXIT,
494 VM_CAP_PAUSE_EXIT,
495 VM_CAP_UNRESTRICTED_GUEST,
496 VM_CAP_ENABLE_INVPCID,
497 VM_CAP_BPT_EXIT,
498 VM_CAP_RDPID,
499 VM_CAP_RDTSCP,
500 VM_CAP_IPI_EXIT,
501 VM_CAP_MASK_HWINTR,
502 VM_CAP_RFLAGS_TF,
503 VM_CAP_MAX
504 };
505
506 enum vm_intr_trigger {
507 EDGE_TRIGGER,
508 LEVEL_TRIGGER
509 };
510
511 /*
512 * The 'access' field has the format specified in Table 21-2 of the Intel
513 * Architecture Manual vol 3b.
514 *
515 * XXX The contents of the 'access' field are architecturally defined except
516 * bit 16 - Segment Unusable.
517 */
518 struct seg_desc {
519 uint64_t base;
520 uint32_t limit;
521 uint32_t access;
522 };
523 #define SEG_DESC_TYPE(access) ((access) & 0x001f)
524 #define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
525 #define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
526 #define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
527 #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
528 #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
529
530 enum vm_cpu_mode {
531 CPU_MODE_REAL,
532 CPU_MODE_PROTECTED,
533 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
534 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
535 };
536
537 enum vm_paging_mode {
538 PAGING_MODE_FLAT,
539 PAGING_MODE_32,
540 PAGING_MODE_PAE,
541 PAGING_MODE_64,
542 PAGING_MODE_64_LA57,
543 };
544
545 struct vm_guest_paging {
546 uint64_t cr3;
547 int cpl;
548 enum vm_cpu_mode cpu_mode;
549 enum vm_paging_mode paging_mode;
550 };
551
552 /*
553 * The data structures 'vie' and 'vie_op' are meant to be opaque to the
554 * consumers of instruction decoding. The only reason why their contents
555 * need to be exposed is because they are part of the 'vm_exit' structure.
556 */
557 struct vie_op {
558 uint8_t op_byte; /* actual opcode byte */
559 uint8_t op_type; /* type of operation (e.g. MOV) */
560 uint16_t op_flags;
561 };
562 _Static_assert(sizeof(struct vie_op) == 4, "ABI");
563 _Static_assert(_Alignof(struct vie_op) == 2, "ABI");
564
565 #define VIE_INST_SIZE 15
566 struct vie {
567 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
568 uint8_t num_valid; /* size of the instruction */
569
570 /* The following fields are all zeroed upon restart. */
571 #define vie_startzero num_processed
572 uint8_t num_processed;
573
574 uint8_t addrsize:4, opsize:4; /* address and operand sizes */
575 uint8_t rex_w:1, /* REX prefix */
576 rex_r:1,
577 rex_x:1,
578 rex_b:1,
579 rex_present:1,
580 repz_present:1, /* REP/REPE/REPZ prefix */
581 repnz_present:1, /* REPNE/REPNZ prefix */
582 opsize_override:1, /* Operand size override */
583 addrsize_override:1, /* Address size override */
584 segment_override:1; /* Segment override */
585
586 uint8_t mod:2, /* ModRM byte */
587 reg:4,
588 rm:4;
589
590 uint8_t ss:2, /* SIB byte */
591 vex_present:1, /* VEX prefixed */
592 vex_l:1, /* L bit */
593 index:4, /* SIB byte */
594 base:4; /* SIB byte */
595
596 uint8_t disp_bytes;
597 uint8_t imm_bytes;
598
599 uint8_t scale;
600
601 uint8_t vex_reg:4, /* vvvv: first source register specifier */
602 vex_pp:2, /* pp */
603 _sparebits:2;
604
605 uint8_t _sparebytes[2];
606
607 int base_register; /* VM_REG_GUEST_xyz */
608 int index_register; /* VM_REG_GUEST_xyz */
609 int segment_register; /* VM_REG_GUEST_xyz */
610
611 int64_t displacement; /* optional addr displacement */
612 int64_t immediate; /* optional immediate operand */
613
614 uint8_t decoded; /* set to 1 if successfully decoded */
615
616 uint8_t _sparebyte;
617
618 struct vie_op op; /* opcode description */
619 };
620 _Static_assert(sizeof(struct vie) == 64, "ABI");
621 _Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI");
622 _Static_assert(__offsetof(struct vie, scale) == 24, "ABI");
623 _Static_assert(__offsetof(struct vie, base_register) == 28, "ABI");
624
625 enum vm_exitcode {
626 VM_EXITCODE_INOUT,
627 VM_EXITCODE_VMX,
628 VM_EXITCODE_BOGUS,
629 VM_EXITCODE_RDMSR,
630 VM_EXITCODE_WRMSR,
631 VM_EXITCODE_HLT,
632 VM_EXITCODE_MTRAP,
633 VM_EXITCODE_PAUSE,
634 VM_EXITCODE_PAGING,
635 VM_EXITCODE_INST_EMUL,
636 VM_EXITCODE_SPINUP_AP,
637 VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
638 VM_EXITCODE_RENDEZVOUS,
639 VM_EXITCODE_IOAPIC_EOI,
640 VM_EXITCODE_SUSPENDED,
641 VM_EXITCODE_INOUT_STR,
642 VM_EXITCODE_TASK_SWITCH,
643 VM_EXITCODE_MONITOR,
644 VM_EXITCODE_MWAIT,
645 VM_EXITCODE_SVM,
646 VM_EXITCODE_REQIDLE,
647 VM_EXITCODE_DEBUG,
648 VM_EXITCODE_VMINSN,
649 VM_EXITCODE_BPT,
650 VM_EXITCODE_IPI,
651 VM_EXITCODE_DB,
652 VM_EXITCODE_MAX
653 };
654
655 struct vm_inout {
656 uint16_t bytes:3; /* 1 or 2 or 4 */
657 uint16_t in:1;
658 uint16_t string:1;
659 uint16_t rep:1;
660 uint16_t port;
661 uint32_t eax; /* valid for out */
662 };
663
664 struct vm_inout_str {
665 struct vm_inout inout; /* must be the first element */
666 struct vm_guest_paging paging;
667 uint64_t rflags;
668 uint64_t cr0;
669 uint64_t index;
670 uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
671 int addrsize;
672 enum vm_reg_name seg_name;
673 struct seg_desc seg_desc;
674 };
675
676 enum task_switch_reason {
677 TSR_CALL,
678 TSR_IRET,
679 TSR_JMP,
680 TSR_IDT_GATE, /* task gate in IDT */
681 };
682
683 struct vm_task_switch {
684 uint16_t tsssel; /* new TSS selector */
685 int ext; /* task switch due to external event */
686 uint32_t errcode;
687 int errcode_valid; /* push 'errcode' on the new stack */
688 enum task_switch_reason reason;
689 struct vm_guest_paging paging;
690 };
691
692 struct vm_exit {
693 enum vm_exitcode exitcode;
694 int inst_length; /* 0 means unknown */
695 uint64_t rip;
696 union {
697 struct vm_inout inout;
698 struct vm_inout_str inout_str;
699 struct {
700 uint64_t gpa;
701 int fault_type;
702 } paging;
703 struct {
704 uint64_t gpa;
705 uint64_t gla;
706 uint64_t cs_base;
707 int cs_d; /* CS.D */
708 struct vm_guest_paging paging;
709 struct vie vie;
710 } inst_emul;
711 /*
712 * VMX specific payload. Used when there is no "better"
713 * exitcode to represent the VM-exit.
714 */
715 struct {
716 int status; /* vmx inst status */
717 /*
718 * 'exit_reason' and 'exit_qualification' are valid
719 * only if 'status' is zero.
720 */
721 uint32_t exit_reason;
722 uint64_t exit_qualification;
723 /*
724 * 'inst_error' and 'inst_type' are valid
725 * only if 'status' is non-zero.
726 */
727 int inst_type;
728 int inst_error;
729 } vmx;
730 /*
731 * SVM specific payload.
732 */
733 struct {
734 uint64_t exitcode;
735 uint64_t exitinfo1;
736 uint64_t exitinfo2;
737 } svm;
738 struct {
739 int inst_length;
740 } bpt;
741 struct {
742 int trace_trap;
743 int pushf_intercept;
744 int tf_shadow_val;
745 struct vm_guest_paging paging;
746 } dbg;
747 struct {
748 uint32_t code; /* ecx value */
749 uint64_t wval;
750 } msr;
751 struct {
752 int vcpu;
753 uint64_t rip;
754 } spinup_ap;
755 struct {
756 uint64_t rflags;
757 uint64_t intr_status;
758 } hlt;
759 struct {
760 int vector;
761 } ioapic_eoi;
762 struct {
763 enum vm_suspend_how how;
764 } suspended;
765 struct {
766 /*
767 * The destination vCPU mask is saved in vcpu->cpuset
768 * and is copied out to userspace separately to avoid
769 * ABI concerns.
770 */
771 uint32_t mode;
772 uint8_t vector;
773 } ipi;
774 struct vm_task_switch task_switch;
775 } u;
776 };
777
778 /* APIs to inject faults into the guest */
779 void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
780 int errcode);
781
782 static __inline void
vm_inject_ud(struct vcpu * vcpu)783 vm_inject_ud(struct vcpu *vcpu)
784 {
785 vm_inject_fault(vcpu, IDT_UD, 0, 0);
786 }
787
788 static __inline void
vm_inject_gp(struct vcpu * vcpu)789 vm_inject_gp(struct vcpu *vcpu)
790 {
791 vm_inject_fault(vcpu, IDT_GP, 1, 0);
792 }
793
794 static __inline void
vm_inject_ac(struct vcpu * vcpu,int errcode)795 vm_inject_ac(struct vcpu *vcpu, int errcode)
796 {
797 vm_inject_fault(vcpu, IDT_AC, 1, errcode);
798 }
799
800 static __inline void
vm_inject_ss(struct vcpu * vcpu,int errcode)801 vm_inject_ss(struct vcpu *vcpu, int errcode)
802 {
803 vm_inject_fault(vcpu, IDT_SS, 1, errcode);
804 }
805
806 void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2);
807
808 #endif /* _VMM_H_ */
809