xref: /openbsd/sys/arch/amd64/include/vmmvar.h (revision 73471bf0)
1 /*	$OpenBSD: vmmvar.h,v 1.74 2021/09/13 22:16:27 dv Exp $	*/
2 /*
3  * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * CPU capabilities for VMM operation
20  */
21 #ifndef _MACHINE_VMMVAR_H_
22 #define _MACHINE_VMMVAR_H_
23 
24 #define VMM_HV_SIGNATURE 	"OpenBSDVMM58"
25 
26 #define VMM_MAX_MEM_RANGES	16
27 #define VMM_MAX_DISKS_PER_VM	4
28 #define VMM_MAX_PATH_DISK	128
29 #define VMM_MAX_PATH_CDROM	128
30 #define VMM_MAX_NAME_LEN	64
31 #define VMM_MAX_KERNEL_PATH	128
32 #define VMM_MAX_VCPUS		512
33 #define VMM_MAX_VCPUS_PER_VM	64
34 #define VMM_MAX_VM_MEM_SIZE	32768
35 #define VMM_MAX_NICS_PER_VM	4
36 
37 #define VMM_PCI_MMIO_BAR_BASE	0xF0000000ULL
38 #define VMM_PCI_MMIO_BAR_END	0xFFFFFFFFULL
39 #define VMM_PCI_MMIO_BAR_SIZE	0x00010000
40 #define VMM_PCI_IO_BAR_BASE	0x1000
41 #define VMM_PCI_IO_BAR_END	0xFFFF
42 #define VMM_PCI_IO_BAR_SIZE	0x1000
43 
44 /* VMX: Basic Exit Reasons */
45 #define VMX_EXIT_NMI				0
46 #define VMX_EXIT_EXTINT				1
47 #define VMX_EXIT_TRIPLE_FAULT			2
48 #define VMX_EXIT_INIT				3
49 #define VMX_EXIT_SIPI				4
50 #define VMX_EXIT_IO_SMI				5
51 #define VMX_EXIT_OTHER_SMI			6
52 #define VMX_EXIT_INT_WINDOW			7
53 #define VMX_EXIT_NMI_WINDOW			8
54 #define VMX_EXIT_TASK_SWITCH			9
55 #define VMX_EXIT_CPUID				10
56 #define VMX_EXIT_GETSEC				11
57 #define VMX_EXIT_HLT				12
58 #define VMX_EXIT_INVD				13
59 #define VMX_EXIT_INVLPG				14
60 #define VMX_EXIT_RDPMC				15
61 #define VMX_EXIT_RDTSC				16
62 #define VMX_EXIT_RSM				17
63 #define VMX_EXIT_VMCALL				18
64 #define VMX_EXIT_VMCLEAR			19
65 #define VMX_EXIT_VMLAUNCH			20
66 #define VMX_EXIT_VMPTRLD			21
67 #define VMX_EXIT_VMPTRST			22
68 #define VMX_EXIT_VMREAD				23
69 #define VMX_EXIT_VMRESUME			24
70 #define VMX_EXIT_VMWRITE			25
71 #define VMX_EXIT_VMXOFF				26
72 #define VMX_EXIT_VMXON				27
73 #define VMX_EXIT_CR_ACCESS			28
74 #define VMX_EXIT_MOV_DR				29
75 #define VMX_EXIT_IO				30
76 #define VMX_EXIT_RDMSR				31
77 #define VMX_EXIT_WRMSR				32
78 #define VMX_EXIT_ENTRY_FAILED_GUEST_STATE	33
79 #define VMX_EXIT_ENTRY_FAILED_MSR_LOAD		34
80 #define VMX_EXIT_MWAIT				36
81 #define VMX_EXIT_MTF				37
82 #define VMX_EXIT_MONITOR			39
83 #define VMX_EXIT_PAUSE				40
84 #define VMX_EXIT_ENTRY_FAILED_MCE		41
85 #define VMX_EXIT_TPR_BELOW_THRESHOLD		43
86 #define VMX_EXIT_APIC_ACCESS			44
87 #define VMX_EXIT_VIRTUALIZED_EOI		45
88 #define VMX_EXIT_GDTR_IDTR			46
89 #define	VMX_EXIT_LDTR_TR			47
90 #define VMX_EXIT_EPT_VIOLATION			48
91 #define VMX_EXIT_EPT_MISCONFIGURATION		49
92 #define VMX_EXIT_INVEPT				50
93 #define VMX_EXIT_RDTSCP				51
94 #define VMX_EXIT_VMX_PREEMPTION_TIMER_EXPIRED	52
95 #define VMX_EXIT_INVVPID			53
96 #define VMX_EXIT_WBINVD				54
97 #define VMX_EXIT_XSETBV				55
98 #define VMX_EXIT_APIC_WRITE			56
99 #define VMX_EXIT_RDRAND				57
100 #define VMX_EXIT_INVPCID			58
101 #define VMX_EXIT_VMFUNC				59
102 #define VMX_EXIT_RDSEED				61
103 #define VMX_EXIT_XSAVES				63
104 #define VMX_EXIT_XRSTORS			64
105 
106 /*
107  * VMX: Misc defines
108  */
109 #define VMX_MAX_CR3_TARGETS			256
110 #define VMX_VMCS_PA_CLEAR			0xFFFFFFFFFFFFFFFFUL
111 
112 #define VM_EXIT_TERMINATED			0xFFFE
113 #define VM_EXIT_NONE				0xFFFF
114 
115 /*
116  * SVM: Intercept codes (exit reasons)
117  */
118 #define SVM_VMEXIT_CR0_READ			0x00
119 #define SVM_VMEXIT_CR1_READ			0x01
120 #define SVM_VMEXIT_CR2_READ			0x02
121 #define SVM_VMEXIT_CR3_READ			0x03
122 #define SVM_VMEXIT_CR4_READ			0x04
123 #define SVM_VMEXIT_CR5_READ			0x05
124 #define SVM_VMEXIT_CR6_READ			0x06
125 #define SVM_VMEXIT_CR7_READ			0x07
126 #define SVM_VMEXIT_CR8_READ			0x08
127 #define SVM_VMEXIT_CR9_READ			0x09
128 #define SVM_VMEXIT_CR10_READ			0x0A
129 #define SVM_VMEXIT_CR11_READ			0x0B
130 #define SVM_VMEXIT_CR12_READ			0x0C
131 #define SVM_VMEXIT_CR13_READ			0x0D
132 #define SVM_VMEXIT_CR14_READ			0x0E
133 #define SVM_VMEXIT_CR15_READ			0x0F
134 #define SVM_VMEXIT_CR0_WRITE			0x10
135 #define SVM_VMEXIT_CR1_WRITE			0x11
136 #define SVM_VMEXIT_CR2_WRITE			0x12
137 #define SVM_VMEXIT_CR3_WRITE			0x13
138 #define SVM_VMEXIT_CR4_WRITE			0x14
139 #define SVM_VMEXIT_CR5_WRITE			0x15
140 #define SVM_VMEXIT_CR6_WRITE			0x16
141 #define SVM_VMEXIT_CR7_WRITE			0x17
142 #define SVM_VMEXIT_CR8_WRITE			0x18
143 #define SVM_VMEXIT_CR9_WRITE			0x19
144 #define SVM_VMEXIT_CR10_WRITE			0x1A
145 #define SVM_VMEXIT_CR11_WRITE			0x1B
146 #define SVM_VMEXIT_CR12_WRITE			0x1C
147 #define SVM_VMEXIT_CR13_WRITE			0x1D
148 #define SVM_VMEXIT_CR14_WRITE			0x1E
149 #define SVM_VMEXIT_CR15_WRITE			0x1F
150 #define SVM_VMEXIT_DR0_READ			0x20
151 #define SVM_VMEXIT_DR1_READ			0x21
152 #define SVM_VMEXIT_DR2_READ			0x22
153 #define SVM_VMEXIT_DR3_READ			0x23
154 #define SVM_VMEXIT_DR4_READ			0x24
155 #define SVM_VMEXIT_DR5_READ			0x25
156 #define SVM_VMEXIT_DR6_READ			0x26
157 #define SVM_VMEXIT_DR7_READ			0x27
158 #define SVM_VMEXIT_DR8_READ			0x28
159 #define SVM_VMEXIT_DR9_READ			0x29
160 #define SVM_VMEXIT_DR10_READ			0x2A
161 #define SVM_VMEXIT_DR11_READ			0x2B
162 #define SVM_VMEXIT_DR12_READ			0x2C
163 #define SVM_VMEXIT_DR13_READ			0x2D
164 #define SVM_VMEXIT_DR14_READ			0x2E
165 #define SVM_VMEXIT_DR15_READ			0x2F
166 #define SVM_VMEXIT_DR0_WRITE			0x30
167 #define SVM_VMEXIT_DR1_WRITE			0x31
168 #define SVM_VMEXIT_DR2_WRITE			0x32
169 #define SVM_VMEXIT_DR3_WRITE			0x33
170 #define SVM_VMEXIT_DR4_WRITE			0x34
171 #define SVM_VMEXIT_DR5_WRITE			0x35
172 #define SVM_VMEXIT_DR6_WRITE			0x36
173 #define SVM_VMEXIT_DR7_WRITE			0x37
174 #define SVM_VMEXIT_DR8_WRITE			0x38
175 #define SVM_VMEXIT_DR9_WRITE			0x39
176 #define SVM_VMEXIT_DR10_WRITE			0x3A
177 #define SVM_VMEXIT_DR11_WRITE			0x3B
178 #define SVM_VMEXIT_DR12_WRITE			0x3C
179 #define SVM_VMEXIT_DR13_WRITE			0x3D
180 #define SVM_VMEXIT_DR14_WRITE			0x3E
181 #define SVM_VMEXIT_DR15_WRITE			0x3F
182 #define SVM_VMEXIT_EXCP0			0x40
183 #define SVM_VMEXIT_EXCP1			0x41
184 #define SVM_VMEXIT_EXCP2			0x42
185 #define SVM_VMEXIT_EXCP3			0x43
186 #define SVM_VMEXIT_EXCP4			0x44
187 #define SVM_VMEXIT_EXCP5			0x45
188 #define SVM_VMEXIT_EXCP6			0x46
189 #define SVM_VMEXIT_EXCP7			0x47
190 #define SVM_VMEXIT_EXCP8			0x48
191 #define SVM_VMEXIT_EXCP9			0x49
192 #define SVM_VMEXIT_EXCP10			0x4A
193 #define SVM_VMEXIT_EXCP11			0x4B
194 #define SVM_VMEXIT_EXCP12			0x4C
195 #define SVM_VMEXIT_EXCP13			0x4D
196 #define SVM_VMEXIT_EXCP14			0x4E
197 #define SVM_VMEXIT_EXCP15			0x4F
198 #define SVM_VMEXIT_EXCP16			0x50
199 #define SVM_VMEXIT_EXCP17			0x51
200 #define SVM_VMEXIT_EXCP18			0x52
201 #define SVM_VMEXIT_EXCP19			0x53
202 #define SVM_VMEXIT_EXCP20			0x54
203 #define SVM_VMEXIT_EXCP21			0x55
204 #define SVM_VMEXIT_EXCP22			0x56
205 #define SVM_VMEXIT_EXCP23			0x57
206 #define SVM_VMEXIT_EXCP24			0x58
207 #define SVM_VMEXIT_EXCP25			0x59
208 #define SVM_VMEXIT_EXCP26			0x5A
209 #define SVM_VMEXIT_EXCP27			0x5B
210 #define SVM_VMEXIT_EXCP28			0x5C
211 #define SVM_VMEXIT_EXCP29			0x5D
212 #define SVM_VMEXIT_EXCP30			0x5E
213 #define SVM_VMEXIT_EXCP31			0x5F
214 #define SVM_VMEXIT_INTR				0x60
215 #define SVM_VMEXIT_NMI				0x61
216 #define SVM_VMEXIT_SMI				0x62
217 #define SVM_VMEXIT_INIT				0x63
218 #define SVM_VMEXIT_VINTR			0x64
219 #define SVM_VMEXIT_CR0_SEL_WRITE		0x65
220 #define SVM_VMEXIT_IDTR_READ			0x66
221 #define SVM_VMEXIT_GDTR_READ			0x67
222 #define SVM_VMEXIT_LDTR_READ			0x68
223 #define SVM_VMEXIT_TR_READ			0x69
224 #define SVM_VMEXIT_IDTR_WRITE			0x6A
225 #define SVM_VMEXIT_GDTR_WRITE			0x6B
226 #define SVM_VMEXIT_LDTR_WRITE			0x6C
227 #define SVM_VMEXIT_TR_WRITE			0x6D
228 #define SVM_VMEXIT_RDTSC			0x6E
229 #define SVM_VMEXIT_RDPMC			0x6F
230 #define SVM_VMEXIT_PUSHF			0x70
231 #define SVM_VMEXIT_POPF				0x71
232 #define SVM_VMEXIT_CPUID			0x72
233 #define SVM_VMEXIT_RSM				0x73
234 #define SVM_VMEXIT_IRET				0x74
235 #define SVM_VMEXIT_SWINT			0x75
236 #define SVM_VMEXIT_INVD				0x76
237 #define SVM_VMEXIT_PAUSE			0x77
238 #define SVM_VMEXIT_HLT				0x78
239 #define SVM_VMEXIT_INVLPG			0x79
240 #define SVM_VMEXIT_INVLPGA			0x7A
241 #define SVM_VMEXIT_IOIO				0x7B
242 #define SVM_VMEXIT_MSR				0x7C
243 #define SVM_VMEXIT_TASK_SWITCH			0x7D
244 #define SVM_VMEXIT_FERR_FREEZE			0x7E
245 #define SVM_VMEXIT_SHUTDOWN			0x7F
246 #define SVM_VMEXIT_VMRUN			0x80
247 #define SVM_VMEXIT_VMMCALL			0x81
248 #define SVM_VMEXIT_VMLOAD			0x82
249 #define SVM_VMEXIT_VMSAVE			0x83
250 #define SVM_VMEXIT_STGI				0x84
251 #define SVM_VMEXIT_CLGI				0x85
252 #define SVM_VMEXIT_SKINIT			0x86
253 #define SVM_VMEXIT_RDTSCP			0x87
254 #define SVM_VMEXIT_ICEBP			0x88
255 #define SVM_VMEXIT_WBINVD			0x89
256 #define SVM_VMEXIT_MONITOR			0x8A
257 #define SVM_VMEXIT_MWAIT			0x8B
258 #define SVM_VMEXIT_MWAIT_CONDITIONAL		0x8C
259 #define SVM_VMEXIT_XSETBV			0x8D
260 #define SVM_VMEXIT_EFER_WRITE_TRAP		0x8F
261 #define SVM_VMEXIT_CR0_WRITE_TRAP		0x90
262 #define SVM_VMEXIT_CR1_WRITE_TRAP		0x91
263 #define SVM_VMEXIT_CR2_WRITE_TRAP		0x92
264 #define SVM_VMEXIT_CR3_WRITE_TRAP		0x93
265 #define SVM_VMEXIT_CR4_WRITE_TRAP		0x94
266 #define SVM_VMEXIT_CR5_WRITE_TRAP		0x95
267 #define SVM_VMEXIT_CR6_WRITE_TRAP		0x96
268 #define SVM_VMEXIT_CR7_WRITE_TRAP		0x97
269 #define SVM_VMEXIT_CR8_WRITE_TRAP		0x98
270 #define SVM_VMEXIT_CR9_WRITE_TRAP		0x99
271 #define SVM_VMEXIT_CR10_WRITE_TRAP		0x9A
272 #define SVM_VMEXIT_CR11_WRITE_TRAP		0x9B
273 #define SVM_VMEXIT_CR12_WRITE_TRAP		0x9C
274 #define SVM_VMEXIT_CR13_WRITE_TRAP		0x9D
275 #define SVM_VMEXIT_CR14_WRITE_TRAP		0x9E
276 #define SVM_VMEXIT_CR15_WRITE_TRAP		0x9F
277 #define SVM_VMEXIT_NPF				0x400
278 #define SVM_AVIC_INCOMPLETE_IPI			0x401
279 #define SVM_AVIC_NOACCEL			0x402
280 #define SVM_VMEXIT_VMGEXIT			0x403
281 #define SVM_VMEXIT_INVALID			-1
282 
283 /*
284  * Exception injection vectors (these correspond to the CPU exception types
285  * defined in the SDM.)
286  */
287 #define VMM_EX_DE	0	/* Divide Error #DE */
288 #define VMM_EX_DB	1	/* Debug Exception #DB */
289 #define VMM_EX_NMI	2	/* NMI */
290 #define VMM_EX_BP	3	/* Breakpoint #BP */
291 #define VMM_EX_OF	4	/* Overflow #OF */
292 #define VMM_EX_BR	5	/* Bound range exceeded #BR */
293 #define VMM_EX_UD	6	/* Undefined opcode #UD */
294 #define VMM_EX_NM	7	/* Device not available #NM */
295 #define VMM_EX_DF	8	/* Double fault #DF */
296 #define VMM_EX_CP	9	/* Coprocessor segment overrun (unused) */
297 #define VMM_EX_TS	10	/* Invalid TSS #TS */
298 #define VMM_EX_NP	11	/* Segment not present #NP */
299 #define VMM_EX_SS	12	/* Stack segment fault #SS */
300 #define VMM_EX_GP	13	/* General protection #GP */
301 #define VMM_EX_PF	14	/* Page fault #PF */
302 #define VMM_EX_MF	16	/* x87 FPU floating point error #MF */
303 #define VMM_EX_AC	17	/* Alignment check #AC */
304 #define VMM_EX_MC	18	/* Machine check #MC */
305 #define VMM_EX_XM	19	/* SIMD floating point exception #XM */
306 #define VMM_EX_VE	20	/* Virtualization exception #VE */
307 
308 /*
309  * VCPU state values. Note that there is a conversion function in vmm.c
310  * (vcpu_state_decode) that converts these to human readable strings,
311  * so this enum and vcpu_state_decode should be kept in sync.
312  */
313 enum {
314 	VCPU_STATE_STOPPED,
315 	VCPU_STATE_RUNNING,
316 	VCPU_STATE_REQTERM,
317 	VCPU_STATE_TERMINATED,
318 	VCPU_STATE_UNKNOWN,
319 };
320 
321 enum {
322 	VEI_DIR_OUT,
323 	VEI_DIR_IN
324 };
325 
326 enum {
327 	VEE_FAULT_PROTECT
328 };
329 
330 enum {
331 	VMM_CPU_MODE_REAL,
332 	VMM_CPU_MODE_PROT,
333 	VMM_CPU_MODE_PROT32,
334 	VMM_CPU_MODE_COMPAT,
335 	VMM_CPU_MODE_LONG,
336 	VMM_CPU_MODE_UNKNOWN,
337 };
338 
339 /*
340  * Port definitions not found elsewhere
341  */
342 #define PCKBC_AUX	0x61
343 #define ELCR0		0x4D0
344 #define ELCR1		0x4D1
345 
346 /*
347  * vm exit data
348  *  vm_exit_inout		: describes an IN/OUT exit
349  */
350 struct vm_exit_inout {
351 	uint8_t			vei_size;	/* Size of access */
352 	uint8_t			vei_dir;	/* Direction */
353 	uint8_t			vei_rep;	/* REP prefix? */
354 	uint8_t			vei_string;	/* string variety? */
355 	uint8_t			vei_encoding;	/* operand encoding */
356 	uint16_t		vei_port;	/* port */
357 	uint32_t		vei_data;	/* data */
358 };
359 /*
360  *  vm_exit_eptviolation	: describes an EPT VIOLATION exit
361  */
362 struct vm_exit_eptviolation {
363 	uint8_t		vee_fault_type;
364 };
365 
366 /*
367  * struct vcpu_segment_info
368  *
369  * Describes a segment + selector set, used in constructing the initial vcpu
370  * register content
371  */
372 struct vcpu_segment_info {
373 	uint16_t vsi_sel;
374 	uint32_t vsi_limit;
375 	uint32_t vsi_ar;
376 	uint64_t vsi_base;
377 };
378 
379 #define VCPU_REGS_RAX		0
380 #define VCPU_REGS_RBX		1
381 #define VCPU_REGS_RCX		2
382 #define VCPU_REGS_RDX		3
383 #define VCPU_REGS_RSI		4
384 #define VCPU_REGS_RDI		5
385 #define VCPU_REGS_R8		6
386 #define VCPU_REGS_R9		7
387 #define VCPU_REGS_R10		8
388 #define VCPU_REGS_R11		9
389 #define VCPU_REGS_R12		10
390 #define VCPU_REGS_R13		11
391 #define VCPU_REGS_R14		12
392 #define VCPU_REGS_R15		13
393 #define VCPU_REGS_RSP		14
394 #define VCPU_REGS_RBP		15
395 #define VCPU_REGS_RIP		16
396 #define VCPU_REGS_RFLAGS	17
397 #define VCPU_REGS_NGPRS		(VCPU_REGS_RFLAGS + 1)
398 
399 #define VCPU_REGS_CR0	0
400 #define VCPU_REGS_CR2	1
401 #define VCPU_REGS_CR3	2
402 #define VCPU_REGS_CR4	3
403 #define VCPU_REGS_CR8	4
404 #define VCPU_REGS_XCR0	5
405 #define VCPU_REGS_PDPTE0 6
406 #define VCPU_REGS_PDPTE1 7
407 #define VCPU_REGS_PDPTE2 8
408 #define VCPU_REGS_PDPTE3 9
409 #define VCPU_REGS_NCRS	(VCPU_REGS_PDPTE3 + 1)
410 
411 #define VCPU_REGS_CS		0
412 #define VCPU_REGS_DS		1
413 #define VCPU_REGS_ES		2
414 #define VCPU_REGS_FS		3
415 #define VCPU_REGS_GS		4
416 #define VCPU_REGS_SS		5
417 #define VCPU_REGS_LDTR		6
418 #define VCPU_REGS_TR		7
419 #define VCPU_REGS_NSREGS	(VCPU_REGS_TR + 1)
420 
421 #define VCPU_REGS_EFER   	0
422 #define VCPU_REGS_STAR   	1
423 #define VCPU_REGS_LSTAR  	2
424 #define VCPU_REGS_CSTAR  	3
425 #define VCPU_REGS_SFMASK 	4
426 #define VCPU_REGS_KGSBASE	5
427 #define VCPU_REGS_MISC_ENABLE	6
428 #define VCPU_REGS_NMSRS	(VCPU_REGS_MISC_ENABLE + 1)
429 
430 #define VCPU_REGS_DR0		0
431 #define VCPU_REGS_DR1		1
432 #define VCPU_REGS_DR2		2
433 #define VCPU_REGS_DR3		3
434 #define VCPU_REGS_DR6		4
435 #define VCPU_REGS_DR7		5
436 #define VCPU_REGS_NDRS	(VCPU_REGS_DR7 + 1)
437 
438 struct vcpu_reg_state {
439 	uint64_t			vrs_gprs[VCPU_REGS_NGPRS];
440 	uint64_t			vrs_crs[VCPU_REGS_NCRS];
441 	uint64_t			vrs_msrs[VCPU_REGS_NMSRS];
442 	uint64_t			vrs_drs[VCPU_REGS_NDRS];
443 	struct vcpu_segment_info	vrs_sregs[VCPU_REGS_NSREGS];
444 	struct vcpu_segment_info	vrs_gdtr;
445 	struct vcpu_segment_info	vrs_idtr;
446 };
447 
448 struct vm_mem_range {
449 	paddr_t	vmr_gpa;
450 	vaddr_t vmr_va;
451 	size_t	vmr_size;
452 };
453 
454 /*
455  * struct vm_exit
456  *
457  * Contains VM exit information communicated to vmd(8). This information is
458  * gathered by vmm(4) from the CPU on each exit that requires help from vmd.
459  */
460 struct vm_exit {
461 	union {
462 		struct vm_exit_inout		vei;	/* IN/OUT exit */
463 		struct vm_exit_eptviolation	vee;	/* EPT VIOLATION exit*/
464 	};
465 
466 	struct vcpu_reg_state		vrs;
467 	int				cpl;
468 };
469 
470 struct vm_create_params {
471 	/* Input parameters to VMM_IOC_CREATE */
472 	size_t			vcp_nmemranges;
473 	size_t			vcp_ncpus;
474 	size_t			vcp_ndisks;
475 	size_t			vcp_nnics;
476 	struct vm_mem_range	vcp_memranges[VMM_MAX_MEM_RANGES];
477 	char			vcp_disks[VMM_MAX_DISKS_PER_VM][VMM_MAX_PATH_DISK];
478 	char			vcp_cdrom[VMM_MAX_PATH_CDROM];
479 	char			vcp_name[VMM_MAX_NAME_LEN];
480 	char			vcp_kernel[VMM_MAX_KERNEL_PATH];
481 	uint8_t			vcp_macs[VMM_MAX_NICS_PER_VM][6];
482 
483 	/* Output parameter from VMM_IOC_CREATE */
484 	uint32_t	vcp_id;
485 };
486 
487 struct vm_run_params {
488 	/* Input parameters to VMM_IOC_RUN */
489 	uint32_t	vrp_vm_id;
490 	uint32_t	vrp_vcpu_id;
491 	uint8_t		vrp_continue;		/* Continuing from an exit */
492 	uint16_t	vrp_irq;		/* IRQ to inject */
493 
494 	/* Input/output parameter to VMM_IOC_RUN */
495 	struct vm_exit	*vrp_exit;		/* updated exit data */
496 
497 	/* Output parameter from VMM_IOC_RUN */
498 	uint16_t	vrp_exit_reason;	/* exit reason */
499 	uint8_t		vrp_irqready;		/* ready for IRQ on entry */
500 };
501 
502 struct vm_info_result {
503 	/* Output parameters from VMM_IOC_INFO */
504 	size_t		vir_memory_size;
505 	size_t		vir_used_size;
506 	size_t		vir_ncpus;
507 	uint8_t		vir_vcpu_state[VMM_MAX_VCPUS_PER_VM];
508 	pid_t		vir_creator_pid;
509 	uint32_t	vir_id;
510 	char		vir_name[VMM_MAX_NAME_LEN];
511 };
512 
513 struct vm_info_params {
514 	/* Input parameters to VMM_IOC_INFO */
515 	size_t			vip_size;	/* Output buffer size */
516 
517 	/* Output Parameters from VMM_IOC_INFO */
518 	size_t			 vip_info_ct;	/* # of entries returned */
519 	struct vm_info_result	*vip_info;	/* Output buffer */
520 };
521 
522 struct vm_terminate_params {
523 	/* Input parameters to VMM_IOC_TERM */
524 	uint32_t		vtp_vm_id;
525 };
526 
527 struct vm_resetcpu_params {
528 	/* Input parameters to VMM_IOC_RESETCPU */
529 	uint32_t		vrp_vm_id;
530 	uint32_t		vrp_vcpu_id;
531 	struct vcpu_reg_state	vrp_init_state;
532 };
533 
534 struct vm_intr_params {
535 	/* Input parameters to VMM_IOC_INTR */
536 	uint32_t		vip_vm_id;
537 	uint32_t		vip_vcpu_id;
538 	uint16_t		vip_intr;
539 };
540 
541 #define VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA 0x1	/* read/write pvclock gpa */
542 #define VM_RWVMPARAMS_PVCLOCK_VERSION	 0x2	/* read/write pvclock version */
543 #define VM_RWVMPARAMS_ALL	(VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA | \
544     VM_RWVMPARAMS_PVCLOCK_VERSION)
545 
546 struct vm_rwvmparams_params {
547 	/* Input parameters to VMM_IOC_READVMPARAMS/VMM_IOC_WRITEVMPARAMS */
548 	uint32_t		vpp_vm_id;
549 	uint32_t		vpp_vcpu_id;
550 	uint32_t		vpp_mask;
551 	paddr_t			vpp_pvclock_system_gpa;
552 	uint32_t		vpp_pvclock_version;
553 };
554 
555 #define VM_RWREGS_GPRS	0x1	/* read/write GPRs */
556 #define VM_RWREGS_SREGS	0x2	/* read/write segment registers */
557 #define VM_RWREGS_CRS	0x4	/* read/write CRs */
558 #define VM_RWREGS_MSRS	0x8	/* read/write MSRs */
559 #define VM_RWREGS_DRS	0x10	/* read/write DRs */
560 #define VM_RWREGS_ALL	(VM_RWREGS_GPRS | VM_RWREGS_SREGS | VM_RWREGS_CRS | \
561     VM_RWREGS_MSRS | VM_RWREGS_DRS)
562 
563 struct vm_rwregs_params {
564 	/*
565 	 * Input/output parameters to VMM_IOC_READREGS /
566 	 * VMM_IOC_WRITEREGS
567 	 */
568 	uint32_t		vrwp_vm_id;
569 	uint32_t		vrwp_vcpu_id;
570 	uint64_t		vrwp_mask;
571 	struct vcpu_reg_state	vrwp_regs;
572 };
573 
574 struct vm_mprotect_ept_params {
575 	/* Input parameters to VMM_IOC_MPROTECT_EPT */
576 	uint32_t		vmep_vm_id;
577 	uint32_t		vmep_vcpu_id;
578 	vaddr_t			vmep_sgpa;
579 	size_t			vmep_size;
580 	int			vmep_prot;
581 };
582 
583 /* IOCTL definitions */
584 #define VMM_IOC_CREATE _IOWR('V', 1, struct vm_create_params) /* Create VM */
585 #define VMM_IOC_RUN _IOWR('V', 2, struct vm_run_params) /* Run VCPU */
586 #define VMM_IOC_INFO _IOWR('V', 3, struct vm_info_params) /* Get VM Info */
587 #define VMM_IOC_TERM _IOW('V', 4, struct vm_terminate_params) /* Terminate VM */
588 #define VMM_IOC_RESETCPU _IOW('V', 5, struct vm_resetcpu_params) /* Reset */
589 #define VMM_IOC_INTR _IOW('V', 6, struct vm_intr_params) /* Intr pending */
590 #define VMM_IOC_READREGS _IOWR('V', 7, struct vm_rwregs_params) /* Get regs */
591 #define VMM_IOC_WRITEREGS _IOW('V', 8, struct vm_rwregs_params) /* Set regs */
592 /* Get VM params */
593 #define VMM_IOC_READVMPARAMS _IOWR('V', 9, struct vm_rwvmparams_params)
594 /* Set VM params */
595 #define VMM_IOC_WRITEVMPARAMS _IOW('V', 10, struct vm_rwvmparams_params)
596 /* Control the protection of ept pages*/
597 #define VMM_IOC_MPROTECT_EPT _IOW('V', 11, struct vm_mprotect_ept_params)
598 
599 /* CPUID masks */
600 /*
601  * clone host capabilities minus:
602  *  debug store (CPUIDECX_DTES64, CPUIDECX_DSCPL, CPUID_DS)
603  *  monitor/mwait (CPUIDECX_MWAIT, CPUIDECX_MWAITX)
604  *  vmx/svm (CPUIDECX_VMX, CPUIDECX_SVM)
605  *  smx (CPUIDECX_SMX)
606  *  speedstep (CPUIDECX_EST)
607  *  thermal (CPUIDECX_TM2, CPUID_ACPI, CPUID_TM)
608  *  context id (CPUIDECX_CNXTID)
609  *  machine check (CPUID_MCE, CPUID_MCA)
610  *  silicon debug (CPUIDECX_SDBG)
611  *  xTPR (CPUIDECX_XTPR)
612  *  perf/debug (CPUIDECX_PDCM)
613  *  pcid (CPUIDECX_PCID)
614  *  direct cache access (CPUIDECX_DCA)
615  *  x2APIC (CPUIDECX_X2APIC)
616  *  apic deadline (CPUIDECX_DEADLINE)
617  *  apic (CPUID_APIC)
618  *  psn (CPUID_PSN)
619  *  self snoop (CPUID_SS)
620  *  hyperthreading (CPUID_HTT)
621  *  pending break enabled (CPUID_PBE)
622  *  MTRR (CPUID_MTRR)
623  *  Speculative execution control features (AMD)
624  */
625 #define VMM_CPUIDECX_MASK ~(CPUIDECX_EST | CPUIDECX_TM2 | CPUIDECX_MWAIT | \
626     CPUIDECX_PDCM | CPUIDECX_VMX | CPUIDECX_DTES64 | \
627     CPUIDECX_DSCPL | CPUIDECX_SMX | CPUIDECX_CNXTID | \
628     CPUIDECX_SDBG | CPUIDECX_XTPR | CPUIDECX_PCID | \
629     CPUIDECX_DCA | CPUIDECX_X2APIC | CPUIDECX_DEADLINE)
630 #define VMM_ECPUIDECX_MASK ~(CPUIDECX_SVM | CPUIDECX_MWAITX)
631 #define VMM_CPUIDEDX_MASK ~(CPUID_ACPI | CPUID_TM | \
632     CPUID_HTT | CPUID_DS | CPUID_APIC | \
633     CPUID_PSN | CPUID_SS | CPUID_PBE | \
634     CPUID_MTRR | CPUID_MCE | CPUID_MCA)
635 #define VMM_AMDSPEC_EBX_MASK ~(CPUIDEBX_IBPB | CPUIDEBX_IBRS | \
636     CPUIDEBX_STIBP | CPUIDEBX_IBRS_ALWAYSON | CPUIDEBX_STIBP_ALWAYSON | \
637     CPUIDEBX_IBRS_PREF | CPUIDEBX_SSBD | CPUIDEBX_VIRT_SSBD | \
638     CPUIDEBX_SSBD_NOTREQ)
639 
640 /*
641  * SEFF flags - copy from host minus:
642  *  TSC_ADJUST (SEFF0EBX_TSC_ADJUST)
643  *  SGX (SEFF0EBX_SGX)
644  *  HLE (SEFF0EBX_HLE)
645  *  INVPCID (SEFF0EBX_INVPCID)
646  *  RTM (SEFF0EBX_RTM)
647  *  PQM (SEFF0EBX_PQM)
648  *  AVX512F (SEFF0EBX_AVX512F)
649  *  AVX512DQ (SEFF0EBX_AVX512DQ)
650  *  AVX512IFMA (SEFF0EBX_AVX512IFMA)
651  *  AVX512PF (SEFF0EBX_AVX512PF)
652  *  AVX512ER (SEFF0EBX_AVX512ER)
653  *  AVX512CD (SEFF0EBX_AVX512CD)
654  *  AVX512BW (SEFF0EBX_AVX512BW)
655  *  AVX512VL (SEFF0EBX_AVX512VL)
656  *  MPX (SEFF0EBX_MPX)
657  *  PCOMMIT (SEFF0EBX_PCOMMIT)
658  *  PT (SEFF0EBX_PT)
659  *  AVX512VBMI (SEFF0ECX_AVX512VBMI)
660  */
661 #define VMM_SEFF0EBX_MASK ~(SEFF0EBX_TSC_ADJUST | SEFF0EBX_SGX | \
662     SEFF0EBX_HLE | SEFF0EBX_INVPCID | \
663     SEFF0EBX_RTM | SEFF0EBX_PQM | SEFF0EBX_MPX | \
664     SEFF0EBX_PCOMMIT | SEFF0EBX_PT | \
665     SEFF0EBX_AVX512F | SEFF0EBX_AVX512DQ | \
666     SEFF0EBX_AVX512IFMA | SEFF0EBX_AVX512PF | \
667     SEFF0EBX_AVX512ER | SEFF0EBX_AVX512CD | \
668     SEFF0EBX_AVX512BW | SEFF0EBX_AVX512VL)
669 #define VMM_SEFF0ECX_MASK ~(SEFF0ECX_AVX512VBMI)
670 
671 /* EDX mask contains the bits to include */
672 #define VMM_SEFF0EDX_MASK (SEFF0EDX_MD_CLEAR)
673 
674 /*
675  * Extended function flags - copy from host minus:
676  * 0x80000001  EDX:RDTSCP Support
677  */
678 #define VMM_FEAT_EFLAGS_MASK ~(CPUID_RDTSCP)
679 
680 /*
681  * CPUID[0x4] deterministic cache info
682  */
683 #define VMM_CPUID4_CACHE_TOPOLOGY_MASK	0x3FF
684 
685 #ifdef _KERNEL
686 
687 #define VMX_FAIL_LAUNCH_UNKNOWN 1
688 #define VMX_FAIL_LAUNCH_INVALID_VMCS 2
689 #define VMX_FAIL_LAUNCH_VALID_VMCS 3
690 
691 #define VMX_NUM_MSR_STORE 7
692 
693 /* MSR bitmap manipulation macros */
694 #define VMX_MSRIDX(m) ((m) / 8)
695 #define VMX_MSRBIT(m) (1 << (m) % 8)
696 
697 #define SVM_MSRIDX(m) ((m) / 4)
698 #define SVM_MSRBIT_R(m) (1 << (((m) % 4) * 2))
699 #define SVM_MSRBIT_W(m) (1 << (((m) % 4) * 2 + 1))
700 
701 enum {
702 	VMM_MODE_UNKNOWN,
703 	VMM_MODE_VMX,
704 	VMM_MODE_EPT,
705 	VMM_MODE_SVM,
706 	VMM_MODE_RVI
707 };
708 
709 enum {
710 	VMM_MEM_TYPE_REGULAR,
711 	VMM_MEM_TYPE_UNKNOWN
712 };
713 
714 /* Forward declarations */
715 struct vm;
716 
717 /*
718  * Implementation-specific cpu state
719  */
720 
721 struct vmcb_segment {
722 	uint16_t 			vs_sel;			/* 000h */
723 	uint16_t 			vs_attr;		/* 002h */
724 	uint32_t			vs_lim;			/* 004h */
725 	uint64_t			vs_base;		/* 008h */
726 };
727 
728 struct vmcb {
729 	union {
730 		struct {
731 			uint32_t	v_cr_rw;		/* 000h */
732 			uint32_t	v_dr_rw;		/* 004h */
733 			uint32_t	v_excp;			/* 008h */
734 			uint32_t	v_intercept1;		/* 00Ch */
735 			uint32_t	v_intercept2;		/* 010h */
736 			uint8_t		v_pad1[0x28];		/* 014h-03Bh */
737 			uint16_t	v_pause_thr;		/* 03Ch */
738 			uint16_t	v_pause_ct;		/* 03Eh */
739 			uint64_t	v_iopm_pa;		/* 040h */
740 			uint64_t	v_msrpm_pa;		/* 048h */
741 			uint64_t	v_tsc_offset;		/* 050h */
742 			uint32_t	v_asid;			/* 058h */
743 			uint8_t		v_tlb_control;		/* 05Ch */
744 			uint8_t		v_pad2[0x3];		/* 05Dh-05Fh */
745 			uint8_t		v_tpr;			/* 060h */
746 			uint8_t		v_irq;			/* 061h */
747 			uint8_t		v_intr_misc;		/* 062h */
748 			uint8_t		v_intr_masking;		/* 063h */
749 			uint8_t		v_intr_vector;		/* 064h */
750 			uint8_t		v_pad3[0x3];		/* 065h-067h */
751 			uint64_t	v_intr_shadow;		/* 068h */
752 			uint64_t	v_exitcode;		/* 070h */
753 			uint64_t	v_exitinfo1;		/* 078h */
754 			uint64_t	v_exitinfo2;		/* 080h */
755 			uint64_t	v_exitintinfo;		/* 088h */
756 			uint64_t	v_np_enable;		/* 090h */
757 			uint64_t	v_avic_apic_bar;	/* 098h */
758 			uint64_t	v_pad4;			/* 0A0h */
759 			uint64_t	v_eventinj;		/* 0A8h */
760 			uint64_t	v_n_cr3;		/* 0B0h */
761 			uint64_t	v_lbr_virt_enable;	/* 0B8h */
762 			uint64_t	v_vmcb_clean_bits;	/* 0C0h */
763 			uint64_t	v_nrip;			/* 0C8h */
764 			uint8_t		v_n_bytes_fetched;	/* 0D0h */
765 			uint8_t		v_guest_ins_bytes[0xf];	/* 0D1h-0DFh */
766 			uint64_t	v_avic_apic_back_page;	/* 0E0h */
767 			uint64_t	v_pad5;			/* 0E8h-0EFh */
768 			uint64_t	v_avic_logical_table;	/* 0F0h */
769 			uint64_t	v_avic_phys;		/* 0F8h */
770 
771 		};
772 		uint8_t vmcb_control[0x400];
773 	};
774 
775 	union {
776 		struct {
777 			/* Offsets here are relative to start of VMCB SSA */
778 			struct vmcb_segment	v_es;		/* 000h */
779 			struct vmcb_segment	v_cs;		/* 010h */
780 			struct vmcb_segment	v_ss;		/* 020h */
781 			struct vmcb_segment	v_ds;		/* 030h */
782 			struct vmcb_segment	v_fs;		/* 040h */
783 			struct vmcb_segment	v_gs;		/* 050h */
784 			struct vmcb_segment	v_gdtr;		/* 060h */
785 			struct vmcb_segment	v_ldtr;		/* 070h */
786 			struct vmcb_segment	v_idtr;		/* 080h */
787 			struct vmcb_segment	v_tr;		/* 090h */
788 			uint8_t 		v_pad6[0x2B];	/* 0A0h-0CAh */
789 			uint8_t			v_cpl;		/* 0CBh */
790 			uint32_t		v_pad7;		/* 0CCh-0CFh */
791 			uint64_t		v_efer;		/* 0D0h */
792 			uint8_t			v_pad8[0x70];	/* 0D8h-147h */
793 			uint64_t		v_cr4;		/* 148h */
794 			uint64_t		v_cr3;		/* 150h */
795 			uint64_t		v_cr0;		/* 158h */
796 			uint64_t		v_dr7;		/* 160h */
797 			uint64_t		v_dr6;		/* 168h */
798 			uint64_t		v_rflags;	/* 170h */
799 			uint64_t		v_rip;		/* 178h */
800 			uint64_t		v_pad9[0xB];	/* 180h-1D7h */
801 			uint64_t		v_rsp;		/* 1D8h */
802 			uint64_t		v_pad10[0x3];	/* 1E0h-1F7h */
803 			uint64_t		v_rax;		/* 1F8h */
804 			uint64_t		v_star;		/* 200h */
805 			uint64_t		v_lstar;	/* 208h */
806 			uint64_t		v_cstar;	/* 210h */
807 			uint64_t		v_sfmask;	/* 218h */
808 			uint64_t		v_kgsbase;	/* 220h */
809 			uint64_t		v_sysenter_cs;	/* 228h */
810 			uint64_t		v_sysenter_esp;	/* 230h */
811 			uint64_t		v_sysenter_eip;	/* 238h */
812 			uint64_t		v_cr2;		/* 240h */
813 			uint64_t		v_pad11[0x4];	/* 248h-267h */
814 			uint64_t		v_g_pat;	/* 268h */
815 			uint64_t		v_dbgctl;	/* 270h */
816 			uint64_t		v_br_from;	/* 278h */
817 			uint64_t		v_br_to;	/* 280h */
818 			uint64_t		v_lastexcpfrom;	/* 288h */
819 			uint64_t		v_lastexcpto;	/* 290h */
820 		};
821 
822 		uint8_t vmcb_layout[PAGE_SIZE - 0x400];
823 	};
824 };
825 
826 struct vmcs {
827 	uint32_t	vmcs_revision;
828 };
829 
830 struct vmx_invvpid_descriptor
831 {
832 	uint64_t	vid_vpid;
833 	uint64_t	vid_addr;
834 };
835 
836 struct vmx_invept_descriptor
837 {
838 	uint64_t	vid_eptp;
839 	uint64_t	vid_reserved;
840 };
841 
842 struct vmx_msr_store
843 {
844 	uint64_t	vms_index;
845 	uint64_t	vms_data;
846 };
847 
848 /*
849  * Storage for guest registers not preserved in VMCS and various exit
850  * information.
851  *
852  * Note that vmx/svm_enter_guest depend on the layout of this struct for
853  * field access.
854  */
855 struct vcpu_gueststate
856 {
857 	/* %rsi should be first */
858 	uint64_t	vg_rsi;			/* 0x00 */
859 	uint64_t	vg_rax;			/* 0x08 */
860 	uint64_t	vg_rbx;			/* 0x10 */
861 	uint64_t	vg_rcx;			/* 0x18 */
862 	uint64_t	vg_rdx;			/* 0x20 */
863 	uint64_t	vg_rdi;			/* 0x28 */
864 	uint64_t	vg_rbp;			/* 0x30 */
865 	uint64_t	vg_r8;			/* 0x38 */
866 	uint64_t	vg_r9;			/* 0x40 */
867 	uint64_t	vg_r10;			/* 0x48 */
868 	uint64_t	vg_r11;			/* 0x50 */
869 	uint64_t	vg_r12;			/* 0x58 */
870 	uint64_t	vg_r13;			/* 0x60 */
871 	uint64_t	vg_r14;			/* 0x68 */
872 	uint64_t	vg_r15;			/* 0x70 */
873 	uint64_t	vg_cr2;			/* 0x78 */
874 	uint64_t	vg_rip;			/* 0x80 */
875 	uint32_t	vg_exit_reason;		/* 0x88 */
876 	uint64_t	vg_rflags;		/* 0x90 */
877 	uint64_t	vg_xcr0;		/* 0x98 */
878 	/*
879 	 * Debug registers
880 	 * - %dr4/%dr5 are aliased to %dr6/%dr7 (or cause #DE)
881 	 * - %dr7 is saved automatically in the VMCS
882 	 */
883 	uint64_t	vg_dr0;			/* 0xa0 */
884 	uint64_t	vg_dr1;			/* 0xa8 */
885 	uint64_t	vg_dr2;			/* 0xb0 */
886 	uint64_t	vg_dr3;			/* 0xb8 */
887 	uint64_t	vg_dr6;			/* 0xc0 */
888 };
889 
890 /*
891  * Virtual Machine
892  */
893 struct vm;
894 
895 /*
896  * Virtual CPU
897  */
898 struct vcpu {
899 	/*
900 	 * Guest FPU state - this must remain as the first member of the struct
901 	 * to ensure 64-byte alignment (set up during vcpu_pool init)
902 	 */
903 	struct savefpu vc_g_fpu;
904 
905 	/* VMCS / VMCB pointer */
906 	vaddr_t vc_control_va;
907 	paddr_t vc_control_pa;
908 
909 	/* VLAPIC pointer */
910 	vaddr_t vc_vlapic_va;
911 	uint64_t vc_vlapic_pa;
912 
913 	/* MSR bitmap address */
914 	vaddr_t vc_msr_bitmap_va;
915 	uint64_t vc_msr_bitmap_pa;
916 
917 	struct vm *vc_parent;
918 	uint32_t vc_id;
919 	uint16_t vc_vpid;
920 	u_int vc_state;
921 	SLIST_ENTRY(vcpu) vc_vcpu_link;
922 
923 	uint8_t vc_virt_mode;
924 
925 	struct rwlock vc_lock;
926 	struct cpu_info *vc_last_pcpu;
927 	struct vm_exit vc_exit;
928 
929 	uint16_t vc_intr;
930 	uint8_t vc_irqready;
931 
932 	uint8_t vc_fpuinited;
933 
934 	uint64_t vc_h_xcr0;
935 
936 	struct vcpu_gueststate vc_gueststate;
937 
938 	uint8_t vc_event;
939 
940 	uint32_t vc_pvclock_version;
941 	paddr_t vc_pvclock_system_gpa;
942 	uint32_t vc_pvclock_system_tsc_mul;
943 
944 	/* Shadowed MSRs */
945 	uint64_t vc_shadow_pat;
946 
947 	/* VMX only */
948 	uint64_t vc_vmx_basic;
949 	uint64_t vc_vmx_entry_ctls;
950 	uint64_t vc_vmx_true_entry_ctls;
951 	uint64_t vc_vmx_exit_ctls;
952 	uint64_t vc_vmx_true_exit_ctls;
953 	uint64_t vc_vmx_pinbased_ctls;
954 	uint64_t vc_vmx_true_pinbased_ctls;
955 	uint64_t vc_vmx_procbased_ctls;
956 	uint64_t vc_vmx_true_procbased_ctls;
957 	uint64_t vc_vmx_procbased2_ctls;
958 	vaddr_t vc_vmx_msr_exit_save_va;
959 	paddr_t vc_vmx_msr_exit_save_pa;
960 	vaddr_t vc_vmx_msr_exit_load_va;
961 	paddr_t vc_vmx_msr_exit_load_pa;
962 	vaddr_t vc_vmx_msr_entry_load_va;
963 	paddr_t vc_vmx_msr_entry_load_pa;
964 	uint8_t vc_vmx_vpid_enabled;
965 	uint64_t vc_vmx_cr0_fixed1;
966 	uint64_t vc_vmx_cr0_fixed0;
967 	uint32_t vc_vmx_vmcs_state;
968 #define VMCS_CLEARED	0
969 #define VMCS_LAUNCHED	1
970 
971 	/* SVM only */
972 	vaddr_t vc_svm_hsa_va;
973 	paddr_t vc_svm_hsa_pa;
974 	vaddr_t vc_svm_ioio_va;
975 	paddr_t vc_svm_ioio_pa;
976 };
977 
978 SLIST_HEAD(vcpu_head, vcpu);
979 
980 void	vmm_dispatch_intr(vaddr_t);
981 int	vmxon(uint64_t *);
982 int	vmxoff(void);
983 int	vmclear(paddr_t *);
984 int	vmptrld(paddr_t *);
985 int	vmptrst(paddr_t *);
986 int	vmwrite(uint64_t, uint64_t);
987 int	vmread(uint64_t, uint64_t *);
988 void	invvpid(uint64_t, struct vmx_invvpid_descriptor *);
989 void	invept(uint64_t, struct vmx_invept_descriptor *);
990 int	vmx_enter_guest(paddr_t *, struct vcpu_gueststate *, int, uint8_t);
991 int	svm_enter_guest(uint64_t, struct vcpu_gueststate *,
992     struct region_descriptor *);
993 void	start_vmm_on_cpu(struct cpu_info *);
994 void	stop_vmm_on_cpu(struct cpu_info *);
995 void	vmclear_on_cpu(struct cpu_info *);
996 
997 #endif /* _KERNEL */
998 
999 #endif /* ! _MACHINE_VMMVAR_H_ */
1000