xref: /freebsd/sys/amd64/vmm/intel/vmx_cpufunc.h (revision 95ee2897)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #ifndef	_VMX_CPUFUNC_H_
30 #define	_VMX_CPUFUNC_H_
31 
32 struct vmcs;
33 
34 /*
35  * Section 5.2 "Conventions" from Intel Architecture Manual 2B.
36  *
37  *			error
38  * VMsucceed		  0
39  * VMFailInvalid	  1
40  * VMFailValid		  2	see also VMCS VM-Instruction Error Field
41  */
42 #define	VM_SUCCESS		0
43 #define	VM_FAIL_INVALID		1
44 #define	VM_FAIL_VALID		2
45 #define	VMX_SET_ERROR_CODE \
46 	"	jnc 1f;"						\
47 	"	mov $1, %[error];"	/* CF: error = 1 */		\
48 	"	jmp 3f;"						\
49 	"1:	jnz 2f;"						\
50 	"	mov $2, %[error];"	/* ZF: error = 2 */		\
51 	"	jmp 3f;"						\
52 	"2:	mov $0, %[error];"					\
53 	"3:"
54 
55 /* returns 0 on success and non-zero on failure */
56 static __inline int
vmxon(char * region)57 vmxon(char *region)
58 {
59 	int error;
60 	uint64_t addr;
61 
62 	addr = vtophys(region);
63 	__asm __volatile("vmxon %[addr];"
64 			 VMX_SET_ERROR_CODE
65 			 : [error] "=r" (error)
66 			 : [addr] "m" (*(uint64_t *)&addr)
67 			 : "memory");
68 
69 	return (error);
70 }
71 
72 /* returns 0 on success and non-zero on failure */
73 static __inline int
vmclear(struct vmcs * vmcs)74 vmclear(struct vmcs *vmcs)
75 {
76 	int error;
77 	uint64_t addr;
78 
79 	addr = vtophys(vmcs);
80 	__asm __volatile("vmclear %[addr];"
81 			 VMX_SET_ERROR_CODE
82 			 : [error] "=r" (error)
83 			 : [addr] "m" (*(uint64_t *)&addr)
84 			 : "memory");
85 	return (error);
86 }
87 
88 static __inline void
vmxoff(void)89 vmxoff(void)
90 {
91 
92 	__asm __volatile("vmxoff");
93 }
94 
95 static __inline void
vmptrst(uint64_t * addr)96 vmptrst(uint64_t *addr)
97 {
98 
99 	__asm __volatile("vmptrst %[addr]" :: [addr]"m" (*addr) : "memory");
100 }
101 
102 static __inline int
vmptrld(struct vmcs * vmcs)103 vmptrld(struct vmcs *vmcs)
104 {
105 	int error;
106 	uint64_t addr;
107 
108 	addr = vtophys(vmcs);
109 	__asm __volatile("vmptrld %[addr];"
110 			 VMX_SET_ERROR_CODE
111 			 : [error] "=r" (error)
112 			 : [addr] "m" (*(uint64_t *)&addr)
113 			 : "memory");
114 	return (error);
115 }
116 
117 static __inline int
vmwrite(uint64_t reg,uint64_t val)118 vmwrite(uint64_t reg, uint64_t val)
119 {
120 	int error;
121 
122 	__asm __volatile("vmwrite %[val], %[reg];"
123 			 VMX_SET_ERROR_CODE
124 			 : [error] "=r" (error)
125 			 : [val] "r" (val), [reg] "r" (reg)
126 			 : "memory");
127 
128 	return (error);
129 }
130 
131 static __inline int
vmread(uint64_t r,uint64_t * addr)132 vmread(uint64_t r, uint64_t *addr)
133 {
134 	int error;
135 
136 	__asm __volatile("vmread %[r], %[addr];"
137 			 VMX_SET_ERROR_CODE
138 			 : [error] "=r" (error), [addr] "=m" (*addr)
139 			 : [r] "r" (r)
140 			 : "memory");
141 	return (error);
142 }
143 
144 static void __inline
VMCLEAR(struct vmcs * vmcs)145 VMCLEAR(struct vmcs *vmcs)
146 {
147 	int err;
148 
149 	err = vmclear(vmcs);
150 	if (err != 0)
151 		panic("%s: vmclear(%p) error %d", __func__, vmcs, err);
152 
153 	critical_exit();
154 }
155 
156 static void __inline
VMPTRLD(struct vmcs * vmcs)157 VMPTRLD(struct vmcs *vmcs)
158 {
159 	int err;
160 
161 	critical_enter();
162 
163 	err = vmptrld(vmcs);
164 	if (err != 0)
165 		panic("%s: vmptrld(%p) error %d", __func__, vmcs, err);
166 }
167 
168 #define	INVVPID_TYPE_ADDRESS		0UL
169 #define	INVVPID_TYPE_SINGLE_CONTEXT	1UL
170 #define	INVVPID_TYPE_ALL_CONTEXTS	2UL
171 
172 struct invvpid_desc {
173 	uint16_t	vpid;
174 	uint16_t	_res1;
175 	uint32_t	_res2;
176 	uint64_t	linear_addr;
177 };
178 CTASSERT(sizeof(struct invvpid_desc) == 16);
179 
180 static void __inline
invvpid(uint64_t type,struct invvpid_desc desc)181 invvpid(uint64_t type, struct invvpid_desc desc)
182 {
183 	int error;
184 
185 	__asm __volatile("invvpid %[desc], %[type];"
186 			 VMX_SET_ERROR_CODE
187 			 : [error] "=r" (error)
188 			 : [desc] "m" (desc), [type] "r" (type)
189 			 : "memory");
190 
191 	if (error)
192 		panic("invvpid error %d", error);
193 }
194 
195 #define	INVEPT_TYPE_SINGLE_CONTEXT	1UL
196 #define	INVEPT_TYPE_ALL_CONTEXTS	2UL
197 struct invept_desc {
198 	uint64_t	eptp;
199 	uint64_t	_res;
200 };
201 CTASSERT(sizeof(struct invept_desc) == 16);
202 
203 static void __inline
invept(uint64_t type,struct invept_desc desc)204 invept(uint64_t type, struct invept_desc desc)
205 {
206 	int error;
207 
208 	__asm __volatile("invept %[desc], %[type];"
209 			 VMX_SET_ERROR_CODE
210 			 : [error] "=r" (error)
211 			 : [desc] "m" (desc), [type] "r" (type)
212 			 : "memory");
213 
214 	if (error)
215 		panic("invept error %d", error);
216 }
217 #endif
218