1 /*
2 * Copyright (c) 2018-2021 Maxime Villard, m00nbsd.net
3 * All rights reserved.
4 *
5 * This code is part of the NVMM hypervisor.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/mman.h>
33
34 #include "../nvmm.h"
35 #include "../nvmm_internal.h"
36 #include "nvmm_x86.h"
37
38 int vmx_vmlaunch(uint64_t *gprs);
39 int vmx_vmresume(uint64_t *gprs);
40 void vmx_resume_rip(void);
41
42 struct ept_desc {
43 uint64_t eptp;
44 uint64_t mbz;
45 } __packed;
46
47 struct vpid_desc {
48 uint64_t vpid;
49 uint64_t addr;
50 } __packed;
51
52 static inline void
vmx_vmxon(paddr_t * pa)53 vmx_vmxon(paddr_t *pa)
54 {
55 __asm volatile (
56 "vmxon %[pa];"
57 "jz vmx_insn_failvalid;"
58 "jc vmx_insn_failinvalid;"
59 :
60 : [pa] "m" (*pa)
61 : "memory", "cc"
62 );
63 }
64
65 static inline void
vmx_vmxoff(void)66 vmx_vmxoff(void)
67 {
68 __asm volatile (
69 "vmxoff;"
70 "jz vmx_insn_failvalid;"
71 "jc vmx_insn_failinvalid;"
72 :
73 :
74 : "memory", "cc"
75 );
76 }
77
78 static inline void
vmx_invept(uint64_t op,struct ept_desc * desc)79 vmx_invept(uint64_t op, struct ept_desc *desc)
80 {
81 __asm volatile (
82 "invept %[desc],%[op];"
83 "jz vmx_insn_failvalid;"
84 "jc vmx_insn_failinvalid;"
85 :
86 : [desc] "m" (*desc), [op] "r" (op)
87 : "memory", "cc"
88 );
89 }
90
91 static inline void
vmx_invvpid(uint64_t op,struct vpid_desc * desc)92 vmx_invvpid(uint64_t op, struct vpid_desc *desc)
93 {
94 __asm volatile (
95 "invvpid %[desc],%[op];"
96 "jz vmx_insn_failvalid;"
97 "jc vmx_insn_failinvalid;"
98 :
99 : [desc] "m" (*desc), [op] "r" (op)
100 : "memory", "cc"
101 );
102 }
103
104 static inline uint64_t
vmx_vmread(uint64_t field)105 vmx_vmread(uint64_t field)
106 {
107 uint64_t value;
108
109 __asm volatile (
110 "vmread %[field],%[value];"
111 "jz vmx_insn_failvalid;"
112 "jc vmx_insn_failinvalid;"
113 : [value] "=r" (value)
114 : [field] "r" (field)
115 : "cc"
116 );
117
118 return value;
119 }
120
121 static inline void
vmx_vmwrite(uint64_t field,uint64_t value)122 vmx_vmwrite(uint64_t field, uint64_t value)
123 {
124 __asm volatile (
125 "vmwrite %[value],%[field];"
126 "jz vmx_insn_failvalid;"
127 "jc vmx_insn_failinvalid;"
128 :
129 : [field] "r" (field), [value] "r" (value)
130 : "cc"
131 );
132 }
133
134 static inline paddr_t __unused
vmx_vmptrst(void)135 vmx_vmptrst(void)
136 {
137 paddr_t pa;
138
139 __asm volatile (
140 "vmptrst %[pa];"
141 :
142 : [pa] "m" (*(paddr_t *)&pa)
143 : "memory"
144 );
145
146 return pa;
147 }
148
149 static inline void
vmx_vmptrld(paddr_t * pa)150 vmx_vmptrld(paddr_t *pa)
151 {
152 __asm volatile (
153 "vmptrld %[pa];"
154 "jz vmx_insn_failvalid;"
155 "jc vmx_insn_failinvalid;"
156 :
157 : [pa] "m" (*pa)
158 : "memory", "cc"
159 );
160 }
161
162 static inline void
vmx_vmclear(paddr_t * pa)163 vmx_vmclear(paddr_t *pa)
164 {
165 __asm volatile (
166 "vmclear %[pa];"
167 "jz vmx_insn_failvalid;"
168 "jc vmx_insn_failinvalid;"
169 :
170 : [pa] "m" (*pa)
171 : "memory", "cc"
172 );
173 }
174
175 static inline void
vmx_cli(void)176 vmx_cli(void)
177 {
178 __asm volatile ("cli" ::: "memory");
179 }
180
181 static inline void
vmx_sti(void)182 vmx_sti(void)
183 {
184 __asm volatile ("sti" ::: "memory");
185 }
186
187 #define MSR_IA32_FEATURE_CONTROL 0x003A
188 #define IA32_FEATURE_CONTROL_LOCK __BIT(0)
189 #define IA32_FEATURE_CONTROL_IN_SMX __BIT(1)
190 #define IA32_FEATURE_CONTROL_OUT_SMX __BIT(2)
191
192 #define MSR_IA32_VMX_BASIC 0x0480
193 #define IA32_VMX_BASIC_IDENT __BITS(30,0)
194 #define IA32_VMX_BASIC_DATA_SIZE __BITS(44,32)
195 #define IA32_VMX_BASIC_MEM_WIDTH __BIT(48)
196 #define IA32_VMX_BASIC_DUAL __BIT(49)
197 #define IA32_VMX_BASIC_MEM_TYPE __BITS(53,50)
198 #define MEM_TYPE_UC 0
199 #define MEM_TYPE_WB 6
200 #define IA32_VMX_BASIC_IO_REPORT __BIT(54)
201 #define IA32_VMX_BASIC_TRUE_CTLS __BIT(55)
202
203 #define MSR_IA32_VMX_PINBASED_CTLS 0x0481
204 #define MSR_IA32_VMX_PROCBASED_CTLS 0x0482
205 #define MSR_IA32_VMX_EXIT_CTLS 0x0483
206 #define MSR_IA32_VMX_ENTRY_CTLS 0x0484
207 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x048B
208
209 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x048D
210 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x048E
211 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x048F
212 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x0490
213
214 #define MSR_IA32_VMX_CR0_FIXED0 0x0486
215 #define MSR_IA32_VMX_CR0_FIXED1 0x0487
216 #define MSR_IA32_VMX_CR4_FIXED0 0x0488
217 #define MSR_IA32_VMX_CR4_FIXED1 0x0489
218
219 #define MSR_IA32_VMX_EPT_VPID_CAP 0x048C
220 #define IA32_VMX_EPT_VPID_XO __BIT(0)
221 #define IA32_VMX_EPT_VPID_WALKLENGTH_4 __BIT(6)
222 #define IA32_VMX_EPT_VPID_UC __BIT(8)
223 #define IA32_VMX_EPT_VPID_WB __BIT(14)
224 #define IA32_VMX_EPT_VPID_2MB __BIT(16)
225 #define IA32_VMX_EPT_VPID_1GB __BIT(17)
226 #define IA32_VMX_EPT_VPID_INVEPT __BIT(20)
227 #define IA32_VMX_EPT_VPID_FLAGS_AD __BIT(21)
228 #define IA32_VMX_EPT_VPID_ADVANCED_VMEXIT_INFO __BIT(22)
229 #define IA32_VMX_EPT_VPID_SHSTK __BIT(23)
230 #define IA32_VMX_EPT_VPID_INVEPT_CONTEXT __BIT(25)
231 #define IA32_VMX_EPT_VPID_INVEPT_ALL __BIT(26)
232 #define IA32_VMX_EPT_VPID_INVVPID __BIT(32)
233 #define IA32_VMX_EPT_VPID_INVVPID_ADDR __BIT(40)
234 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT __BIT(41)
235 #define IA32_VMX_EPT_VPID_INVVPID_ALL __BIT(42)
236 #define IA32_VMX_EPT_VPID_INVVPID_CONTEXT_NOG __BIT(43)
237
238 /* -------------------------------------------------------------------------- */
239
240 /* 16-bit control fields */
241 #define VMCS_VPID 0x00000000
242 #define VMCS_PIR_VECTOR 0x00000002
243 #define VMCS_EPTP_INDEX 0x00000004
244 /* 16-bit guest-state fields */
245 #define VMCS_GUEST_ES_SELECTOR 0x00000800
246 #define VMCS_GUEST_CS_SELECTOR 0x00000802
247 #define VMCS_GUEST_SS_SELECTOR 0x00000804
248 #define VMCS_GUEST_DS_SELECTOR 0x00000806
249 #define VMCS_GUEST_FS_SELECTOR 0x00000808
250 #define VMCS_GUEST_GS_SELECTOR 0x0000080A
251 #define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
252 #define VMCS_GUEST_TR_SELECTOR 0x0000080E
253 #define VMCS_GUEST_INTR_STATUS 0x00000810
254 #define VMCS_PML_INDEX 0x00000812
255 /* 16-bit host-state fields */
256 #define VMCS_HOST_ES_SELECTOR 0x00000C00
257 #define VMCS_HOST_CS_SELECTOR 0x00000C02
258 #define VMCS_HOST_SS_SELECTOR 0x00000C04
259 #define VMCS_HOST_DS_SELECTOR 0x00000C06
260 #define VMCS_HOST_FS_SELECTOR 0x00000C08
261 #define VMCS_HOST_GS_SELECTOR 0x00000C0A
262 #define VMCS_HOST_TR_SELECTOR 0x00000C0C
263 /* 64-bit control fields */
264 #define VMCS_IO_BITMAP_A 0x00002000
265 #define VMCS_IO_BITMAP_B 0x00002002
266 #define VMCS_MSR_BITMAP 0x00002004
267 #define VMCS_EXIT_MSR_STORE_ADDRESS 0x00002006
268 #define VMCS_EXIT_MSR_LOAD_ADDRESS 0x00002008
269 #define VMCS_ENTRY_MSR_LOAD_ADDRESS 0x0000200A
270 #define VMCS_EXECUTIVE_VMCS 0x0000200C
271 #define VMCS_PML_ADDRESS 0x0000200E
272 #define VMCS_TSC_OFFSET 0x00002010
273 #define VMCS_VIRTUAL_APIC 0x00002012
274 #define VMCS_APIC_ACCESS 0x00002014
275 #define VMCS_PIR_DESC 0x00002016
276 #define VMCS_VM_CONTROL 0x00002018
277 #define VMCS_EPTP 0x0000201A
278 #define EPTP_TYPE __BITS(2,0)
279 #define EPTP_TYPE_UC 0
280 #define EPTP_TYPE_WB 6
281 #define EPTP_WALKLEN __BITS(5,3)
282 #define EPTP_FLAGS_AD __BIT(6)
283 #define EPTP_SSS __BIT(7)
284 #define EPTP_PHYSADDR __BITS(63,12)
285 #define VMCS_EOI_EXIT0 0x0000201C
286 #define VMCS_EOI_EXIT1 0x0000201E
287 #define VMCS_EOI_EXIT2 0x00002020
288 #define VMCS_EOI_EXIT3 0x00002022
289 #define VMCS_EPTP_LIST 0x00002024
290 #define VMCS_VMREAD_BITMAP 0x00002026
291 #define VMCS_VMWRITE_BITMAP 0x00002028
292 #define VMCS_VIRTUAL_EXCEPTION 0x0000202A
293 #define VMCS_XSS_EXIT_BITMAP 0x0000202C
294 #define VMCS_ENCLS_EXIT_BITMAP 0x0000202E
295 #define VMCS_SUBPAGE_PERM_TABLE_PTR 0x00002030
296 #define VMCS_TSC_MULTIPLIER 0x00002032
297 #define VMCS_ENCLV_EXIT_BITMAP 0x00002036
298 /* 64-bit read-only fields */
299 #define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
300 /* 64-bit guest-state fields */
301 #define VMCS_LINK_POINTER 0x00002800
302 #define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
303 #define VMCS_GUEST_IA32_PAT 0x00002804
304 #define VMCS_GUEST_IA32_EFER 0x00002806
305 #define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
306 #define VMCS_GUEST_PDPTE0 0x0000280A
307 #define VMCS_GUEST_PDPTE1 0x0000280C
308 #define VMCS_GUEST_PDPTE2 0x0000280E
309 #define VMCS_GUEST_PDPTE3 0x00002810
310 #define VMCS_GUEST_BNDCFGS 0x00002812
311 #define VMCS_GUEST_RTIT_CTL 0x00002814
312 #define VMCS_GUEST_PKRS 0x00002818
313 /* 64-bit host-state fields */
314 #define VMCS_HOST_IA32_PAT 0x00002C00
315 #define VMCS_HOST_IA32_EFER 0x00002C02
316 #define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
317 #define VMCS_HOST_IA32_PKRS 0x00002C06
318 /* 32-bit control fields */
319 #define VMCS_PINBASED_CTLS 0x00004000
320 #define PIN_CTLS_INT_EXITING __BIT(0)
321 #define PIN_CTLS_NMI_EXITING __BIT(3)
322 #define PIN_CTLS_VIRTUAL_NMIS __BIT(5)
323 #define PIN_CTLS_ACTIVATE_PREEMPT_TIMER __BIT(6)
324 #define PIN_CTLS_PROCESS_POSTED_INTS __BIT(7)
325 #define VMCS_PROCBASED_CTLS 0x00004002
326 #define PROC_CTLS_INT_WINDOW_EXITING __BIT(2)
327 #define PROC_CTLS_USE_TSC_OFFSETTING __BIT(3)
328 #define PROC_CTLS_HLT_EXITING __BIT(7)
329 #define PROC_CTLS_INVLPG_EXITING __BIT(9)
330 #define PROC_CTLS_MWAIT_EXITING __BIT(10)
331 #define PROC_CTLS_RDPMC_EXITING __BIT(11)
332 #define PROC_CTLS_RDTSC_EXITING __BIT(12)
333 #define PROC_CTLS_RCR3_EXITING __BIT(15)
334 #define PROC_CTLS_LCR3_EXITING __BIT(16)
335 #define PROC_CTLS_RCR8_EXITING __BIT(19)
336 #define PROC_CTLS_LCR8_EXITING __BIT(20)
337 #define PROC_CTLS_USE_TPR_SHADOW __BIT(21)
338 #define PROC_CTLS_NMI_WINDOW_EXITING __BIT(22)
339 #define PROC_CTLS_DR_EXITING __BIT(23)
340 #define PROC_CTLS_UNCOND_IO_EXITING __BIT(24)
341 #define PROC_CTLS_USE_IO_BITMAPS __BIT(25)
342 #define PROC_CTLS_MONITOR_TRAP_FLAG __BIT(27)
343 #define PROC_CTLS_USE_MSR_BITMAPS __BIT(28)
344 #define PROC_CTLS_MONITOR_EXITING __BIT(29)
345 #define PROC_CTLS_PAUSE_EXITING __BIT(30)
346 #define PROC_CTLS_ACTIVATE_CTLS2 __BIT(31)
347 #define VMCS_EXCEPTION_BITMAP 0x00004004
348 #define VMCS_PF_ERROR_MASK 0x00004006
349 #define VMCS_PF_ERROR_MATCH 0x00004008
350 #define VMCS_CR3_TARGET_COUNT 0x0000400A
351 #define VMCS_EXIT_CTLS 0x0000400C
352 #define EXIT_CTLS_SAVE_DEBUG_CONTROLS __BIT(2)
353 #define EXIT_CTLS_HOST_LONG_MODE __BIT(9)
354 #define EXIT_CTLS_LOAD_PERFGLOBALCTRL __BIT(12)
355 #define EXIT_CTLS_ACK_INTERRUPT __BIT(15)
356 #define EXIT_CTLS_SAVE_PAT __BIT(18)
357 #define EXIT_CTLS_LOAD_PAT __BIT(19)
358 #define EXIT_CTLS_SAVE_EFER __BIT(20)
359 #define EXIT_CTLS_LOAD_EFER __BIT(21)
360 #define EXIT_CTLS_SAVE_PREEMPT_TIMER __BIT(22)
361 #define EXIT_CTLS_CLEAR_BNDCFGS __BIT(23)
362 #define EXIT_CTLS_CONCEAL_PT __BIT(24)
363 #define EXIT_CTLS_CLEAR_RTIT_CTL __BIT(25)
364 #define EXIT_CTLS_LOAD_CET __BIT(28)
365 #define EXIT_CTLS_LOAD_PKRS __BIT(29)
366 #define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
367 #define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
368 #define VMCS_ENTRY_CTLS 0x00004012
369 #define ENTRY_CTLS_LOAD_DEBUG_CONTROLS __BIT(2)
370 #define ENTRY_CTLS_LONG_MODE __BIT(9)
371 #define ENTRY_CTLS_SMM __BIT(10)
372 #define ENTRY_CTLS_DISABLE_DUAL __BIT(11)
373 #define ENTRY_CTLS_LOAD_PERFGLOBALCTRL __BIT(13)
374 #define ENTRY_CTLS_LOAD_PAT __BIT(14)
375 #define ENTRY_CTLS_LOAD_EFER __BIT(15)
376 #define ENTRY_CTLS_LOAD_BNDCFGS __BIT(16)
377 #define ENTRY_CTLS_CONCEAL_PT __BIT(17)
378 #define ENTRY_CTLS_LOAD_RTIT_CTL __BIT(18)
379 #define ENTRY_CTLS_LOAD_CET __BIT(20)
380 #define ENTRY_CTLS_LOAD_PKRS __BIT(22)
381 #define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
382 #define VMCS_ENTRY_INTR_INFO 0x00004016
383 #define INTR_INFO_VECTOR __BITS(7,0)
384 #define INTR_INFO_TYPE __BITS(10,8)
385 #define INTR_TYPE_EXT_INT 0
386 #define INTR_TYPE_NMI 2
387 #define INTR_TYPE_HW_EXC 3
388 #define INTR_TYPE_SW_INT 4
389 #define INTR_TYPE_PRIV_SW_EXC 5
390 #define INTR_TYPE_SW_EXC 6
391 #define INTR_TYPE_OTHER 7
392 #define INTR_INFO_ERROR __BIT(11)
393 #define INTR_INFO_VALID __BIT(31)
394 #define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
395 #define VMCS_ENTRY_INSTRUCTION_LENGTH 0x0000401A
396 #define VMCS_TPR_THRESHOLD 0x0000401C
397 #define VMCS_PROCBASED_CTLS2 0x0000401E
398 #define PROC_CTLS2_VIRT_APIC_ACCESSES __BIT(0)
399 #define PROC_CTLS2_ENABLE_EPT __BIT(1)
400 #define PROC_CTLS2_DESC_TABLE_EXITING __BIT(2)
401 #define PROC_CTLS2_ENABLE_RDTSCP __BIT(3)
402 #define PROC_CTLS2_VIRT_X2APIC __BIT(4)
403 #define PROC_CTLS2_ENABLE_VPID __BIT(5)
404 #define PROC_CTLS2_WBINVD_EXITING __BIT(6)
405 #define PROC_CTLS2_UNRESTRICTED_GUEST __BIT(7)
406 #define PROC_CTLS2_APIC_REG_VIRT __BIT(8)
407 #define PROC_CTLS2_VIRT_INT_DELIVERY __BIT(9)
408 #define PROC_CTLS2_PAUSE_LOOP_EXITING __BIT(10)
409 #define PROC_CTLS2_RDRAND_EXITING __BIT(11)
410 #define PROC_CTLS2_INVPCID_ENABLE __BIT(12)
411 #define PROC_CTLS2_VMFUNC_ENABLE __BIT(13)
412 #define PROC_CTLS2_VMCS_SHADOWING __BIT(14)
413 #define PROC_CTLS2_ENCLS_EXITING __BIT(15)
414 #define PROC_CTLS2_RDSEED_EXITING __BIT(16)
415 #define PROC_CTLS2_PML_ENABLE __BIT(17)
416 #define PROC_CTLS2_EPT_VIOLATION __BIT(18)
417 #define PROC_CTLS2_CONCEAL_VMX_FROM_PT __BIT(19)
418 #define PROC_CTLS2_XSAVES_ENABLE __BIT(20)
419 #define PROC_CTLS2_MODE_BASED_EXEC_EPT __BIT(22)
420 #define PROC_CTLS2_SUBPAGE_PERMISSIONS __BIT(23)
421 #define PROC_CTLS2_PT_USES_GPA __BIT(24)
422 #define PROC_CTLS2_USE_TSC_SCALING __BIT(25)
423 #define PROC_CTLS2_WAIT_PAUSE_ENABLE __BIT(26)
424 #define PROC_CTLS2_ENCLV_EXITING __BIT(28)
425 #define VMCS_PLE_GAP 0x00004020
426 #define VMCS_PLE_WINDOW 0x00004022
427 /* 32-bit read-only data fields */
428 #define VMCS_INSTRUCTION_ERROR 0x00004400
429 #define VMCS_EXIT_REASON 0x00004402
430 #define VMCS_EXIT_INTR_INFO 0x00004404
431 #define VMCS_EXIT_INTR_ERRCODE 0x00004406
432 #define VMCS_IDT_VECTORING_INFO 0x00004408
433 #define VMCS_IDT_VECTORING_ERROR 0x0000440A
434 #define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
435 #define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
436 /* 32-bit guest-state fields */
437 #define VMCS_GUEST_ES_LIMIT 0x00004800
438 #define VMCS_GUEST_CS_LIMIT 0x00004802
439 #define VMCS_GUEST_SS_LIMIT 0x00004804
440 #define VMCS_GUEST_DS_LIMIT 0x00004806
441 #define VMCS_GUEST_FS_LIMIT 0x00004808
442 #define VMCS_GUEST_GS_LIMIT 0x0000480A
443 #define VMCS_GUEST_LDTR_LIMIT 0x0000480C
444 #define VMCS_GUEST_TR_LIMIT 0x0000480E
445 #define VMCS_GUEST_GDTR_LIMIT 0x00004810
446 #define VMCS_GUEST_IDTR_LIMIT 0x00004812
447 #define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
448 #define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
449 #define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
450 #define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
451 #define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
452 #define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
453 #define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
454 #define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
455 #define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
456 #define INT_STATE_STI __BIT(0)
457 #define INT_STATE_MOVSS __BIT(1)
458 #define INT_STATE_SMI __BIT(2)
459 #define INT_STATE_NMI __BIT(3)
460 #define INT_STATE_ENCLAVE __BIT(4)
461 #define VMCS_GUEST_ACTIVITY 0x00004826
462 #define VMCS_GUEST_SMBASE 0x00004828
463 #define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
464 #define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
465 /* 32-bit host state fields */
466 #define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
467 /* Natural-Width control fields */
468 #define VMCS_CR0_MASK 0x00006000
469 #define VMCS_CR4_MASK 0x00006002
470 #define VMCS_CR0_SHADOW 0x00006004
471 #define VMCS_CR4_SHADOW 0x00006006
472 #define VMCS_CR3_TARGET0 0x00006008
473 #define VMCS_CR3_TARGET1 0x0000600A
474 #define VMCS_CR3_TARGET2 0x0000600C
475 #define VMCS_CR3_TARGET3 0x0000600E
476 /* Natural-Width read-only fields */
477 #define VMCS_EXIT_QUALIFICATION 0x00006400
478 #define VMCS_IO_RCX 0x00006402
479 #define VMCS_IO_RSI 0x00006404
480 #define VMCS_IO_RDI 0x00006406
481 #define VMCS_IO_RIP 0x00006408
482 #define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
483 /* Natural-Width guest-state fields */
484 #define VMCS_GUEST_CR0 0x00006800
485 #define VMCS_GUEST_CR3 0x00006802
486 #define VMCS_GUEST_CR4 0x00006804
487 #define VMCS_GUEST_ES_BASE 0x00006806
488 #define VMCS_GUEST_CS_BASE 0x00006808
489 #define VMCS_GUEST_SS_BASE 0x0000680A
490 #define VMCS_GUEST_DS_BASE 0x0000680C
491 #define VMCS_GUEST_FS_BASE 0x0000680E
492 #define VMCS_GUEST_GS_BASE 0x00006810
493 #define VMCS_GUEST_LDTR_BASE 0x00006812
494 #define VMCS_GUEST_TR_BASE 0x00006814
495 #define VMCS_GUEST_GDTR_BASE 0x00006816
496 #define VMCS_GUEST_IDTR_BASE 0x00006818
497 #define VMCS_GUEST_DR7 0x0000681A
498 #define VMCS_GUEST_RSP 0x0000681C
499 #define VMCS_GUEST_RIP 0x0000681E
500 #define VMCS_GUEST_RFLAGS 0x00006820
501 #define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
502 #define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
503 #define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
504 #define VMCS_GUEST_IA32_S_CET 0x00006828
505 #define VMCS_GUEST_SSP 0x0000682A
506 #define VMCS_GUEST_IA32_INTR_SSP_TABLE 0x0000682C
507 /* Natural-Width host-state fields */
508 #define VMCS_HOST_CR0 0x00006C00
509 #define VMCS_HOST_CR3 0x00006C02
510 #define VMCS_HOST_CR4 0x00006C04
511 #define VMCS_HOST_FS_BASE 0x00006C06
512 #define VMCS_HOST_GS_BASE 0x00006C08
513 #define VMCS_HOST_TR_BASE 0x00006C0A
514 #define VMCS_HOST_GDTR_BASE 0x00006C0C
515 #define VMCS_HOST_IDTR_BASE 0x00006C0E
516 #define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
517 #define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
518 #define VMCS_HOST_RSP 0x00006C14
519 #define VMCS_HOST_RIP 0x00006C16
520 #define VMCS_HOST_IA32_S_CET 0x00006C18
521 #define VMCS_HOST_SSP 0x00006C1A
522 #define VMCS_HOST_IA32_INTR_SSP_TABLE 0x00006C1C
523
524 /* VMX basic exit reasons. */
525 #define VMCS_EXITCODE_EXC_NMI 0
526 #define VMCS_EXITCODE_EXT_INT 1
527 #define VMCS_EXITCODE_SHUTDOWN 2
528 #define VMCS_EXITCODE_INIT 3
529 #define VMCS_EXITCODE_SIPI 4
530 #define VMCS_EXITCODE_SMI 5
531 #define VMCS_EXITCODE_OTHER_SMI 6
532 #define VMCS_EXITCODE_INT_WINDOW 7
533 #define VMCS_EXITCODE_NMI_WINDOW 8
534 #define VMCS_EXITCODE_TASK_SWITCH 9
535 #define VMCS_EXITCODE_CPUID 10
536 #define VMCS_EXITCODE_GETSEC 11
537 #define VMCS_EXITCODE_HLT 12
538 #define VMCS_EXITCODE_INVD 13
539 #define VMCS_EXITCODE_INVLPG 14
540 #define VMCS_EXITCODE_RDPMC 15
541 #define VMCS_EXITCODE_RDTSC 16
542 #define VMCS_EXITCODE_RSM 17
543 #define VMCS_EXITCODE_VMCALL 18
544 #define VMCS_EXITCODE_VMCLEAR 19
545 #define VMCS_EXITCODE_VMLAUNCH 20
546 #define VMCS_EXITCODE_VMPTRLD 21
547 #define VMCS_EXITCODE_VMPTRST 22
548 #define VMCS_EXITCODE_VMREAD 23
549 #define VMCS_EXITCODE_VMRESUME 24
550 #define VMCS_EXITCODE_VMWRITE 25
551 #define VMCS_EXITCODE_VMXOFF 26
552 #define VMCS_EXITCODE_VMXON 27
553 #define VMCS_EXITCODE_CR 28
554 #define VMCS_EXITCODE_DR 29
555 #define VMCS_EXITCODE_IO 30
556 #define VMCS_EXITCODE_RDMSR 31
557 #define VMCS_EXITCODE_WRMSR 32
558 #define VMCS_EXITCODE_FAIL_GUEST_INVALID 33
559 #define VMCS_EXITCODE_FAIL_MSR_INVALID 34
560 #define VMCS_EXITCODE_MWAIT 36
561 #define VMCS_EXITCODE_TRAP_FLAG 37
562 #define VMCS_EXITCODE_MONITOR 39
563 #define VMCS_EXITCODE_PAUSE 40
564 #define VMCS_EXITCODE_FAIL_MACHINE_CHECK 41
565 #define VMCS_EXITCODE_TPR_BELOW 43
566 #define VMCS_EXITCODE_APIC_ACCESS 44
567 #define VMCS_EXITCODE_VEOI 45
568 #define VMCS_EXITCODE_GDTR_IDTR 46
569 #define VMCS_EXITCODE_LDTR_TR 47
570 #define VMCS_EXITCODE_EPT_VIOLATION 48
571 #define VMCS_EXITCODE_EPT_MISCONFIG 49
572 #define VMCS_EXITCODE_INVEPT 50
573 #define VMCS_EXITCODE_RDTSCP 51
574 #define VMCS_EXITCODE_PREEMPT_TIMEOUT 52
575 #define VMCS_EXITCODE_INVVPID 53
576 #define VMCS_EXITCODE_WBINVD 54
577 #define VMCS_EXITCODE_XSETBV 55
578 #define VMCS_EXITCODE_APIC_WRITE 56
579 #define VMCS_EXITCODE_RDRAND 57
580 #define VMCS_EXITCODE_INVPCID 58
581 #define VMCS_EXITCODE_VMFUNC 59
582 #define VMCS_EXITCODE_ENCLS 60
583 #define VMCS_EXITCODE_RDSEED 61
584 #define VMCS_EXITCODE_PAGE_LOG_FULL 62
585 #define VMCS_EXITCODE_XSAVES 63
586 #define VMCS_EXITCODE_XRSTORS 64
587 #define VMCS_EXITCODE_SPP 66
588 #define VMCS_EXITCODE_UMWAIT 67
589 #define VMCS_EXITCODE_TPAUSE 68
590
591 /* -------------------------------------------------------------------------- */
592
593 static void vmx_vcpu_state_provide(struct nvmm_cpu *, uint64_t);
594 static void vmx_vcpu_state_commit(struct nvmm_cpu *);
595
596 /*
597 * These host values are static, they do not change at runtime and are the same
598 * on all CPUs. We save them here because they are not saved in the VMCS.
599 */
600 static struct {
601 uint64_t xcr0;
602 uint64_t star;
603 uint64_t lstar;
604 uint64_t cstar;
605 uint64_t sfmask;
606 } vmx_global_hstate __cacheline_aligned;
607
608 #define VMX_MSRLIST_STAR 0
609 #define VMX_MSRLIST_LSTAR 1
610 #define VMX_MSRLIST_CSTAR 2
611 #define VMX_MSRLIST_SFMASK 3
612 #define VMX_MSRLIST_KERNELGSBASE 4
613 #define VMX_MSRLIST_EXIT_NMSR 5
614 #define VMX_MSRLIST_L1DFLUSH 5
615
616 /* On entry, we may do +1 to include L1DFLUSH. */
617 static size_t vmx_msrlist_entry_nmsr __read_mostly = VMX_MSRLIST_EXIT_NMSR;
618
619 struct vmxon {
620 uint32_t ident;
621 #define VMXON_IDENT_REVISION __BITS(30,0)
622
623 uint8_t data[PAGE_SIZE - 4];
624 } __packed;
625
626 CTASSERT(sizeof(struct vmxon) == PAGE_SIZE);
627
628 struct vmxoncpu {
629 vaddr_t va;
630 paddr_t pa;
631 };
632
633 static struct vmxoncpu vmxoncpu[OS_MAXCPUS];
634
635 struct vmcs {
636 uint32_t ident;
637 #define VMCS_IDENT_REVISION __BITS(30,0)
638 #define VMCS_IDENT_SHADOW __BIT(31)
639
640 uint32_t abort;
641 uint8_t data[PAGE_SIZE - 8];
642 } __packed;
643
644 CTASSERT(sizeof(struct vmcs) == PAGE_SIZE);
645
646 struct msr_entry {
647 uint32_t msr;
648 uint32_t rsvd;
649 uint64_t val;
650 } __packed;
651
652 #define VPID_MAX 0xFFFF
653
654 /* Make sure we never run out of VPIDs. */
655 CTASSERT(VPID_MAX-1 >= NVMM_MAX_MACHINES * NVMM_MAX_VCPUS);
656
657 static uint64_t vmx_tlb_flush_op __read_mostly;
658 static uint64_t vmx_ept_flush_op __read_mostly;
659 static uint64_t vmx_eptp_type __read_mostly;
660 static bool vmx_ept_has_ad __read_mostly;
661
662 static uint64_t vmx_pinbased_ctls __read_mostly;
663 static uint64_t vmx_procbased_ctls __read_mostly;
664 static uint64_t vmx_procbased_ctls2 __read_mostly;
665 static uint64_t vmx_entry_ctls __read_mostly;
666 static uint64_t vmx_exit_ctls __read_mostly;
667
668 static uint64_t vmx_cr0_fixed0 __read_mostly;
669 static uint64_t vmx_cr0_fixed1 __read_mostly;
670 static uint64_t vmx_cr4_fixed0 __read_mostly;
671 static uint64_t vmx_cr4_fixed1 __read_mostly;
672
673 #define VMX_PINBASED_CTLS_ONE \
674 (PIN_CTLS_INT_EXITING| \
675 PIN_CTLS_NMI_EXITING| \
676 PIN_CTLS_VIRTUAL_NMIS)
677
678 #define VMX_PINBASED_CTLS_ZERO 0
679
680 #define VMX_PROCBASED_CTLS_ONE \
681 (PROC_CTLS_USE_TSC_OFFSETTING| \
682 PROC_CTLS_HLT_EXITING| \
683 PROC_CTLS_MWAIT_EXITING | \
684 PROC_CTLS_RDPMC_EXITING | \
685 PROC_CTLS_RCR8_EXITING | \
686 PROC_CTLS_LCR8_EXITING | \
687 PROC_CTLS_UNCOND_IO_EXITING | /* no I/O bitmap */ \
688 PROC_CTLS_USE_MSR_BITMAPS | \
689 PROC_CTLS_MONITOR_EXITING | \
690 PROC_CTLS_ACTIVATE_CTLS2)
691
692 #define VMX_PROCBASED_CTLS_ZERO \
693 (PROC_CTLS_RCR3_EXITING| \
694 PROC_CTLS_LCR3_EXITING)
695
696 #define VMX_PROCBASED_CTLS2_ONE \
697 (PROC_CTLS2_ENABLE_EPT| \
698 PROC_CTLS2_ENABLE_VPID| \
699 PROC_CTLS2_UNRESTRICTED_GUEST)
700
701 #define VMX_PROCBASED_CTLS2_ZERO 0
702
703 #define VMX_ENTRY_CTLS_ONE \
704 (ENTRY_CTLS_LOAD_DEBUG_CONTROLS| \
705 ENTRY_CTLS_LOAD_EFER| \
706 ENTRY_CTLS_LOAD_PAT)
707
708 #define VMX_ENTRY_CTLS_ZERO \
709 (ENTRY_CTLS_SMM| \
710 ENTRY_CTLS_DISABLE_DUAL)
711
712 #define VMX_EXIT_CTLS_ONE \
713 (EXIT_CTLS_SAVE_DEBUG_CONTROLS| \
714 EXIT_CTLS_HOST_LONG_MODE| \
715 EXIT_CTLS_SAVE_PAT| \
716 EXIT_CTLS_LOAD_PAT| \
717 EXIT_CTLS_SAVE_EFER| \
718 EXIT_CTLS_LOAD_EFER)
719
720 #define VMX_EXIT_CTLS_ZERO 0
721
722 static uint8_t *vmx_asidmap __read_mostly;
723 static uint32_t vmx_maxasid __read_mostly;
724 static os_mtx_t vmx_asidlock __cacheline_aligned;
725
726 #define VMX_XCR0_MASK_DEFAULT (XCR0_X87|XCR0_SSE)
727 static uint64_t vmx_xcr0_mask __read_mostly;
728
729 #define VMX_NCPUIDS 32
730
731 #define VMCS_NPAGES 1
732 #define VMCS_SIZE (VMCS_NPAGES * PAGE_SIZE)
733
734 #define MSRBM_NPAGES 1
735 #define MSRBM_SIZE (MSRBM_NPAGES * PAGE_SIZE)
736
737 #define CR0_STATIC_MASK \
738 (CR0_ET | CR0_NW | CR0_CD)
739
740 #define CR4_VALID \
741 (CR4_VME | \
742 CR4_PVI | \
743 CR4_TSD | \
744 CR4_DE | \
745 CR4_PSE | \
746 CR4_PAE | \
747 CR4_MCE | \
748 CR4_PGE | \
749 CR4_PCE | \
750 CR4_OSFXSR | \
751 CR4_OSXMMEXCPT | \
752 CR4_UMIP | \
753 /* CR4_LA57 excluded */ \
754 /* CR4_VMXE excluded */ \
755 /* CR4_SMXE excluded */ \
756 CR4_FSGSBASE | \
757 CR4_PCIDE | \
758 CR4_OSXSAVE | \
759 CR4_SMEP | \
760 CR4_SMAP \
761 /* CR4_PKE excluded */ \
762 /* CR4_CET excluded */ \
763 /* CR4_PKS excluded */)
764 #define CR4_INVALID \
765 (0xFFFFFFFFFFFFFFFFULL & ~CR4_VALID)
766
767 #define EFER_TLB_FLUSH \
768 (EFER_NXE|EFER_LMA|EFER_LME)
769 #define CR0_TLB_FLUSH \
770 (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
771 #define CR4_TLB_FLUSH \
772 (CR4_PSE|CR4_PAE|CR4_PGE|CR4_PCIDE|CR4_SMEP)
773
774 /* -------------------------------------------------------------------------- */
775
776 struct vmx_machdata {
777 volatile uint64_t mach_htlb_gen;
778 };
779
780 static const size_t vmx_vcpu_conf_sizes[NVMM_X86_VCPU_NCONF] = {
781 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID)] =
782 sizeof(struct nvmm_vcpu_conf_cpuid),
783 [NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR)] =
784 sizeof(struct nvmm_vcpu_conf_tpr)
785 };
786
787 struct vmx_cpudata {
788 /* General. */
789 uint64_t asid;
790 bool gtlb_want_flush;
791 bool gtsc_want_update;
792 uint64_t vcpu_htlb_gen;
793 os_cpuset_t *htlb_want_flush;
794
795 /* VMCS. */
796 struct vmcs *vmcs;
797 paddr_t vmcs_pa;
798 size_t vmcs_refcnt;
799 os_cpu_t *vmcs_cpu;
800 bool vmcs_launched;
801
802 /* MSR bitmap. */
803 uint8_t *msrbm;
804 paddr_t msrbm_pa;
805
806 /* Percpu host state, absent from VMCS. */
807 struct {
808 uint64_t kernelgsbase;
809 uint64_t drs[NVMM_X64_NDR];
810 #ifdef __DragonFly__
811 mcontext_t hmctx; /* TODO: remove this like NetBSD */
812 #endif
813 } hstate;
814
815 /* Intr state. */
816 bool int_window_exit;
817 bool nmi_window_exit;
818 bool evt_pending;
819
820 /* Guest state. */
821 struct msr_entry *gmsr;
822 paddr_t gmsr_pa;
823 uint64_t gmsr_misc_enable;
824 uint64_t gcr2;
825 uint64_t gcr8;
826 uint64_t gxcr0;
827 uint64_t gprs[NVMM_X64_NGPR];
828 uint64_t drs[NVMM_X64_NDR];
829 uint64_t gtsc_offset;
830 uint64_t gtsc_match;
831 struct nvmm_x86_xsave gxsave __aligned(64);
832
833 /* VCPU configuration. */
834 bool cpuidpresent[VMX_NCPUIDS];
835 struct nvmm_vcpu_conf_cpuid cpuid[VMX_NCPUIDS];
836 struct nvmm_vcpu_conf_tpr tpr;
837 };
838
839 static const struct {
840 uint64_t selector;
841 uint64_t attrib;
842 uint64_t limit;
843 uint64_t base;
844 } vmx_guest_segs[NVMM_X64_NSEG] = {
845 [NVMM_X64_SEG_ES] = {
846 VMCS_GUEST_ES_SELECTOR,
847 VMCS_GUEST_ES_ACCESS_RIGHTS,
848 VMCS_GUEST_ES_LIMIT,
849 VMCS_GUEST_ES_BASE
850 },
851 [NVMM_X64_SEG_CS] = {
852 VMCS_GUEST_CS_SELECTOR,
853 VMCS_GUEST_CS_ACCESS_RIGHTS,
854 VMCS_GUEST_CS_LIMIT,
855 VMCS_GUEST_CS_BASE
856 },
857 [NVMM_X64_SEG_SS] = {
858 VMCS_GUEST_SS_SELECTOR,
859 VMCS_GUEST_SS_ACCESS_RIGHTS,
860 VMCS_GUEST_SS_LIMIT,
861 VMCS_GUEST_SS_BASE
862 },
863 [NVMM_X64_SEG_DS] = {
864 VMCS_GUEST_DS_SELECTOR,
865 VMCS_GUEST_DS_ACCESS_RIGHTS,
866 VMCS_GUEST_DS_LIMIT,
867 VMCS_GUEST_DS_BASE
868 },
869 [NVMM_X64_SEG_FS] = {
870 VMCS_GUEST_FS_SELECTOR,
871 VMCS_GUEST_FS_ACCESS_RIGHTS,
872 VMCS_GUEST_FS_LIMIT,
873 VMCS_GUEST_FS_BASE
874 },
875 [NVMM_X64_SEG_GS] = {
876 VMCS_GUEST_GS_SELECTOR,
877 VMCS_GUEST_GS_ACCESS_RIGHTS,
878 VMCS_GUEST_GS_LIMIT,
879 VMCS_GUEST_GS_BASE
880 },
881 [NVMM_X64_SEG_GDT] = {
882 0, /* doesn't exist */
883 0, /* doesn't exist */
884 VMCS_GUEST_GDTR_LIMIT,
885 VMCS_GUEST_GDTR_BASE
886 },
887 [NVMM_X64_SEG_IDT] = {
888 0, /* doesn't exist */
889 0, /* doesn't exist */
890 VMCS_GUEST_IDTR_LIMIT,
891 VMCS_GUEST_IDTR_BASE
892 },
893 [NVMM_X64_SEG_LDT] = {
894 VMCS_GUEST_LDTR_SELECTOR,
895 VMCS_GUEST_LDTR_ACCESS_RIGHTS,
896 VMCS_GUEST_LDTR_LIMIT,
897 VMCS_GUEST_LDTR_BASE
898 },
899 [NVMM_X64_SEG_TR] = {
900 VMCS_GUEST_TR_SELECTOR,
901 VMCS_GUEST_TR_ACCESS_RIGHTS,
902 VMCS_GUEST_TR_LIMIT,
903 VMCS_GUEST_TR_BASE
904 }
905 };
906
907 /* -------------------------------------------------------------------------- */
908
909 static uint64_t
vmx_get_revision(void)910 vmx_get_revision(void)
911 {
912 uint64_t msr;
913
914 msr = rdmsr(MSR_IA32_VMX_BASIC);
915 msr &= IA32_VMX_BASIC_IDENT;
916
917 return msr;
918 }
919
920 static
OS_IPI_FUNC(vmx_vmclear_ipi)921 OS_IPI_FUNC(vmx_vmclear_ipi)
922 {
923 paddr_t vmcs_pa = (paddr_t)arg;
924 vmx_vmclear(&vmcs_pa);
925 }
926
927 static void
vmx_vmclear_remote(os_cpu_t * cpu,paddr_t vmcs_pa)928 vmx_vmclear_remote(os_cpu_t *cpu, paddr_t vmcs_pa)
929 {
930 int bound;
931
932 OS_ASSERT(os_preempt_disabled());
933
934 /*
935 * TODO: OSify curlwp_bind().
936 */
937
938 bound = curlwp_bind();
939 os_preempt_enable();
940
941 os_ipi_unicast(cpu, vmx_vmclear_ipi, (void *)vmcs_pa);
942
943 os_preempt_disable();
944 curlwp_bindx(bound);
945 }
946
947 static void
vmx_vmcs_enter(struct nvmm_cpu * vcpu)948 vmx_vmcs_enter(struct nvmm_cpu *vcpu)
949 {
950 struct vmx_cpudata *cpudata = vcpu->cpudata;
951 os_cpu_t *vmcs_cpu;
952
953 cpudata->vmcs_refcnt++;
954 if (cpudata->vmcs_refcnt > 1) {
955 OS_ASSERT(os_preempt_disabled());
956 OS_ASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
957 return;
958 }
959
960 vmcs_cpu = cpudata->vmcs_cpu;
961 cpudata->vmcs_cpu = (void *)0x00FFFFFFFFFFFFFF; /* clobber */
962
963 os_preempt_disable();
964
965 if (vmcs_cpu == NULL) {
966 /* This VMCS is loaded for the first time. */
967 vmx_vmclear(&cpudata->vmcs_pa);
968 cpudata->vmcs_launched = false;
969 } else if (vmcs_cpu != os_curcpu()) {
970 /* This VMCS is active on a remote CPU. */
971 vmx_vmclear_remote(vmcs_cpu, cpudata->vmcs_pa);
972 cpudata->vmcs_launched = false;
973 } else {
974 /* This VMCS is active on curcpu, nothing to do. */
975 }
976
977 vmx_vmptrld(&cpudata->vmcs_pa);
978 }
979
980 static void
vmx_vmcs_leave(struct nvmm_cpu * vcpu)981 vmx_vmcs_leave(struct nvmm_cpu *vcpu)
982 {
983 struct vmx_cpudata *cpudata = vcpu->cpudata;
984
985 OS_ASSERT(os_preempt_disabled());
986 OS_ASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
987 OS_ASSERT(cpudata->vmcs_refcnt > 0);
988 cpudata->vmcs_refcnt--;
989
990 if (cpudata->vmcs_refcnt > 0) {
991 return;
992 }
993
994 cpudata->vmcs_cpu = os_curcpu();
995 os_preempt_enable();
996 }
997
998 static void
vmx_vmcs_destroy(struct nvmm_cpu * vcpu)999 vmx_vmcs_destroy(struct nvmm_cpu *vcpu)
1000 {
1001 struct vmx_cpudata *cpudata = vcpu->cpudata;
1002
1003 OS_ASSERT(os_preempt_disabled());
1004 OS_ASSERT(vmx_vmptrst() == cpudata->vmcs_pa);
1005 OS_ASSERT(cpudata->vmcs_refcnt == 1);
1006 cpudata->vmcs_refcnt--;
1007
1008 vmx_vmclear(&cpudata->vmcs_pa);
1009 os_preempt_enable();
1010 }
1011
1012 /* -------------------------------------------------------------------------- */
1013
1014 static void
vmx_event_waitexit_enable(struct nvmm_cpu * vcpu,bool nmi)1015 vmx_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
1016 {
1017 struct vmx_cpudata *cpudata = vcpu->cpudata;
1018 uint64_t ctls1;
1019
1020 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1021
1022 if (nmi) {
1023 // XXX INT_STATE_NMI?
1024 ctls1 |= PROC_CTLS_NMI_WINDOW_EXITING;
1025 cpudata->nmi_window_exit = true;
1026 } else {
1027 ctls1 |= PROC_CTLS_INT_WINDOW_EXITING;
1028 cpudata->int_window_exit = true;
1029 }
1030
1031 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1032 }
1033
1034 static void
vmx_event_waitexit_disable(struct nvmm_cpu * vcpu,bool nmi)1035 vmx_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
1036 {
1037 struct vmx_cpudata *cpudata = vcpu->cpudata;
1038 uint64_t ctls1;
1039
1040 ctls1 = vmx_vmread(VMCS_PROCBASED_CTLS);
1041
1042 if (nmi) {
1043 ctls1 &= ~PROC_CTLS_NMI_WINDOW_EXITING;
1044 cpudata->nmi_window_exit = false;
1045 } else {
1046 ctls1 &= ~PROC_CTLS_INT_WINDOW_EXITING;
1047 cpudata->int_window_exit = false;
1048 }
1049
1050 vmx_vmwrite(VMCS_PROCBASED_CTLS, ctls1);
1051 }
1052
1053 static inline bool
vmx_excp_has_rf(uint8_t vector)1054 vmx_excp_has_rf(uint8_t vector)
1055 {
1056 switch (vector) {
1057 case 1: /* #DB */
1058 case 4: /* #OF */
1059 case 8: /* #DF */
1060 case 18: /* #MC */
1061 return false;
1062 default:
1063 return true;
1064 }
1065 }
1066
1067 static inline int
vmx_excp_has_error(uint8_t vector)1068 vmx_excp_has_error(uint8_t vector)
1069 {
1070 switch (vector) {
1071 case 8: /* #DF */
1072 case 10: /* #TS */
1073 case 11: /* #NP */
1074 case 12: /* #SS */
1075 case 13: /* #GP */
1076 case 14: /* #PF */
1077 case 17: /* #AC */
1078 case 21: /* #CP */
1079 case 30: /* #SX */
1080 return 1;
1081 default:
1082 return 0;
1083 }
1084 }
1085
1086 static int
vmx_vcpu_inject(struct nvmm_cpu * vcpu)1087 vmx_vcpu_inject(struct nvmm_cpu *vcpu)
1088 {
1089 struct nvmm_comm_page *comm = vcpu->comm;
1090 struct vmx_cpudata *cpudata = vcpu->cpudata;
1091 int type = 0, err = 0, ret = EINVAL;
1092 uint64_t rflags, info, error;
1093 u_int evtype;
1094 uint8_t vector;
1095
1096 evtype = comm->event.type;
1097 vector = comm->event.vector;
1098 error = comm->event.u.excp.error;
1099 __insn_barrier();
1100
1101 vmx_vmcs_enter(vcpu);
1102
1103 switch (evtype) {
1104 case NVMM_VCPU_EVENT_EXCP:
1105 if (vector == 2 || vector >= 32)
1106 goto out;
1107 if (vector == 3 || vector == 0)
1108 goto out;
1109 if (vmx_excp_has_rf(vector)) {
1110 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1111 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags | PSL_RF);
1112 }
1113 type = INTR_TYPE_HW_EXC;
1114 err = vmx_excp_has_error(vector);
1115 break;
1116 case NVMM_VCPU_EVENT_INTR:
1117 type = INTR_TYPE_EXT_INT;
1118 if (vector == 2) {
1119 type = INTR_TYPE_NMI;
1120 vmx_event_waitexit_enable(vcpu, true);
1121 }
1122 err = 0;
1123 break;
1124 default:
1125 goto out;
1126 }
1127
1128 info =
1129 __SHIFTIN((uint64_t)vector, INTR_INFO_VECTOR) |
1130 __SHIFTIN((uint64_t)type, INTR_INFO_TYPE) |
1131 __SHIFTIN((uint64_t)err, INTR_INFO_ERROR) |
1132 __SHIFTIN((uint64_t)1, INTR_INFO_VALID);
1133 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
1134 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, error);
1135
1136 cpudata->evt_pending = true;
1137 ret = 0;
1138
1139 out:
1140 vmx_vmcs_leave(vcpu);
1141 return ret;
1142 }
1143
1144 static void
vmx_inject_ud(struct nvmm_cpu * vcpu)1145 vmx_inject_ud(struct nvmm_cpu *vcpu)
1146 {
1147 struct nvmm_comm_page *comm = vcpu->comm;
1148 int ret __diagused;
1149
1150 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1151 comm->event.vector = 6;
1152 comm->event.u.excp.error = 0;
1153
1154 ret = vmx_vcpu_inject(vcpu);
1155 OS_ASSERT(ret == 0);
1156 }
1157
1158 static void
vmx_inject_gp(struct nvmm_cpu * vcpu)1159 vmx_inject_gp(struct nvmm_cpu *vcpu)
1160 {
1161 struct nvmm_comm_page *comm = vcpu->comm;
1162 int ret __diagused;
1163
1164 comm->event.type = NVMM_VCPU_EVENT_EXCP;
1165 comm->event.vector = 13;
1166 comm->event.u.excp.error = 0;
1167
1168 ret = vmx_vcpu_inject(vcpu);
1169 OS_ASSERT(ret == 0);
1170 }
1171
1172 static inline int
vmx_vcpu_event_commit(struct nvmm_cpu * vcpu)1173 vmx_vcpu_event_commit(struct nvmm_cpu *vcpu)
1174 {
1175 if (__predict_true(!vcpu->comm->event_commit)) {
1176 return 0;
1177 }
1178 vcpu->comm->event_commit = false;
1179 return vmx_vcpu_inject(vcpu);
1180 }
1181
1182 static inline void
vmx_inkernel_advance(void)1183 vmx_inkernel_advance(void)
1184 {
1185 uint64_t rip, inslen, intstate, rflags;
1186
1187 /*
1188 * Maybe we should also apply single-stepping and debug exceptions.
1189 * Matters for guest-ring3, because it can execute 'cpuid' under a
1190 * debugger.
1191 */
1192
1193 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1194 rip = vmx_vmread(VMCS_GUEST_RIP);
1195 vmx_vmwrite(VMCS_GUEST_RIP, rip + inslen);
1196
1197 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1198 vmx_vmwrite(VMCS_GUEST_RFLAGS, rflags & ~PSL_RF);
1199
1200 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
1201 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY,
1202 intstate & ~(INT_STATE_STI|INT_STATE_MOVSS));
1203 }
1204
1205 static void
vmx_exit_invalid(struct nvmm_vcpu_exit * exit,uint64_t code)1206 vmx_exit_invalid(struct nvmm_vcpu_exit *exit, uint64_t code)
1207 {
1208 exit->u.inv.hwcode = code;
1209 exit->reason = NVMM_VCPU_EXIT_INVALID;
1210 }
1211
1212 static void
vmx_exit_exc_nmi(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1213 vmx_exit_exc_nmi(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1214 struct nvmm_vcpu_exit *exit)
1215 {
1216 uint64_t qual;
1217
1218 qual = vmx_vmread(VMCS_EXIT_INTR_INFO);
1219
1220 if ((qual & INTR_INFO_VALID) == 0) {
1221 goto error;
1222 }
1223 if (__SHIFTOUT(qual, INTR_INFO_TYPE) != INTR_TYPE_NMI) {
1224 goto error;
1225 }
1226
1227 exit->reason = NVMM_VCPU_EXIT_NONE;
1228 return;
1229
1230 error:
1231 vmx_exit_invalid(exit, VMCS_EXITCODE_EXC_NMI);
1232 }
1233
1234 #define VMX_CPUID_MAX_BASIC 0x16
1235 #define VMX_CPUID_MAX_HYPERVISOR 0x40000000
1236 #define VMX_CPUID_MAX_EXTENDED 0x80000008
1237 static uint32_t vmx_cpuid_max_basic __read_mostly;
1238 static uint32_t vmx_cpuid_max_extended __read_mostly;
1239
1240 static void
vmx_inkernel_exec_cpuid(struct vmx_cpudata * cpudata,uint32_t eax,uint32_t ecx)1241 vmx_inkernel_exec_cpuid(struct vmx_cpudata *cpudata, uint32_t eax, uint32_t ecx)
1242 {
1243 cpuid_desc_t descs;
1244
1245 x86_get_cpuid2(eax, ecx, &descs);
1246 cpudata->gprs[NVMM_X64_GPR_RAX] = descs.eax;
1247 cpudata->gprs[NVMM_X64_GPR_RBX] = descs.ebx;
1248 cpudata->gprs[NVMM_X64_GPR_RCX] = descs.ecx;
1249 cpudata->gprs[NVMM_X64_GPR_RDX] = descs.edx;
1250 }
1251
1252 static void
vmx_inkernel_handle_cpuid(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint32_t eax,uint32_t ecx)1253 vmx_inkernel_handle_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1254 uint32_t eax, uint32_t ecx)
1255 {
1256 struct vmx_cpudata *cpudata = vcpu->cpudata;
1257 unsigned int ncpus;
1258 uint32_t clevel;
1259 uint64_t cr4;
1260
1261 if (eax < 0x40000000) {
1262 if (__predict_false(eax > vmx_cpuid_max_basic)) {
1263 eax = vmx_cpuid_max_basic;
1264 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1265 }
1266 } else if (eax < 0x80000000) {
1267 if (__predict_false(eax > VMX_CPUID_MAX_HYPERVISOR)) {
1268 eax = vmx_cpuid_max_basic;
1269 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1270 }
1271 } else {
1272 if (__predict_false(eax > vmx_cpuid_max_extended)) {
1273 eax = vmx_cpuid_max_basic;
1274 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1275 }
1276 }
1277
1278 switch (eax) {
1279 case 0x00000000:
1280 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_basic;
1281 break;
1282 case 0x00000001:
1283 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_00000001.eax;
1284
1285 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_0_01_EBX_LOCAL_APIC_ID;
1286 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1287 CPUID_0_01_EBX_LOCAL_APIC_ID);
1288
1289 ncpus = os_atomic_load_uint(&mach->ncpus);
1290 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_0_01_EBX_HTT_CORES;
1291 cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(ncpus,
1292 CPUID_0_01_EBX_HTT_CORES);
1293
1294 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000001.ecx;
1295 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID_0_01_ECX_RAZ;
1296 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1297 cpudata->gprs[NVMM_X64_GPR_RCX] |= CPUID_0_01_ECX_PCID;
1298 }
1299
1300 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000001.edx;
1301
1302 /* CPUID_0_01_ECX_OSXSAVE depends on CR4. */
1303 cr4 = vmx_vmread(VMCS_GUEST_CR4);
1304 if (!(cr4 & CR4_OSXSAVE)) {
1305 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID_0_01_ECX_OSXSAVE;
1306 }
1307 break;
1308 case 0x00000002:
1309 break;
1310 case 0x00000003:
1311 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1312 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1313 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1314 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1315 break;
1316 case 0x00000004: /* Deterministic Cache Parameters */
1317 ncpus = os_atomic_load_uint(&mach->ncpus);
1318 clevel = __SHIFTOUT(cpudata->gprs[NVMM_X64_GPR_RAX],
1319 CPUID_0_04_EAX_CACHELEVEL);
1320
1321 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~CPUID_0_04_EAX_SHARING;
1322 if (clevel >= 3) {
1323 /* L3 and above: all CPUs. */
1324 cpudata->gprs[NVMM_X64_GPR_RAX] |=
1325 __SHIFTIN(ncpus - 1, CPUID_0_04_EAX_SHARING);
1326 } else {
1327 /* L2 and below: one LP per CPU. */
1328 cpudata->gprs[NVMM_X64_GPR_RAX] |=
1329 __SHIFTIN(0, CPUID_0_04_EAX_SHARING);
1330 }
1331
1332 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~CPUID_0_04_EAX_CORE_P_PKG;
1333 cpudata->gprs[NVMM_X64_GPR_RAX] |=
1334 __SHIFTIN(ncpus - 1, CPUID_0_04_EAX_CORE_P_PKG);
1335 break;
1336 case 0x00000005: /* MONITOR/MWAIT */
1337 case 0x00000006: /* Thermal and Power Management */
1338 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1339 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1340 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1341 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1342 break;
1343 case 0x00000007: /* Structured Extended Feature Flags Enumeration */
1344 switch (ecx) {
1345 case 0:
1346 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1347 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_00000007.ebx;
1348 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_00000007.ecx;
1349 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_00000007.edx;
1350 if (vmx_procbased_ctls2 & PROC_CTLS2_INVPCID_ENABLE) {
1351 cpudata->gprs[NVMM_X64_GPR_RBX] |= CPUID_0_07_EBX_INVPCID;
1352 }
1353 break;
1354 default:
1355 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1356 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1357 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1358 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1359 break;
1360 }
1361 break;
1362 case 0x00000008: /* Empty */
1363 case 0x00000009: /* Direct Cache Access Information */
1364 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1365 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1366 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1367 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1368 break;
1369 case 0x0000000A: /* Architectural Performance Monitoring */
1370 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1371 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1372 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1373 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1374 break;
1375 case 0x0000000B: /* Extended Topology Enumeration */
1376 switch (ecx) {
1377 case 0: /* Threads */
1378 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1379 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1380 cpudata->gprs[NVMM_X64_GPR_RCX] =
1381 __SHIFTIN(ecx, CPUID_0_0B_ECX_LVLNUM) |
1382 __SHIFTIN(CPUID_0_0B_ECX_LVLTYPE_SMT, CPUID_0_0B_ECX_LVLTYPE);
1383 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1384 break;
1385 case 1: /* Cores */
1386 ncpus = os_atomic_load_uint(&mach->ncpus);
1387 cpudata->gprs[NVMM_X64_GPR_RAX] = ilog2(ncpus);
1388 cpudata->gprs[NVMM_X64_GPR_RBX] = ncpus;
1389 cpudata->gprs[NVMM_X64_GPR_RCX] =
1390 __SHIFTIN(ecx, CPUID_0_0B_ECX_LVLNUM) |
1391 __SHIFTIN(CPUID_0_0B_ECX_LVLTYPE_CORE, CPUID_0_0B_ECX_LVLTYPE);
1392 cpudata->gprs[NVMM_X64_GPR_RDX] = vcpu->cpuid;
1393 break;
1394 default:
1395 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1396 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1397 cpudata->gprs[NVMM_X64_GPR_RCX] = 0; /* LVLTYPE_INVAL */
1398 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1399 break;
1400 }
1401 break;
1402 case 0x0000000C: /* Empty */
1403 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1404 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1405 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1406 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1407 break;
1408 case 0x0000000D: /* Processor Extended State Enumeration */
1409 if (vmx_xcr0_mask == 0) {
1410 break;
1411 }
1412 switch (ecx) {
1413 case 0:
1414 /* Supported XCR0 bits. */
1415 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_xcr0_mask & 0xFFFFFFFF;
1416 cpudata->gprs[NVMM_X64_GPR_RDX] = vmx_xcr0_mask >> 32;
1417 /* XSAVE size for currently enabled XCR0 features. */
1418 cpudata->gprs[NVMM_X64_GPR_RBX] = nvmm_x86_xsave_size(cpudata->gxcr0);
1419 /* XSAVE size for all supported XCR0 features. */
1420 cpudata->gprs[NVMM_X64_GPR_RCX] = nvmm_x86_xsave_size(vmx_xcr0_mask);
1421 break;
1422 case 1:
1423 cpudata->gprs[NVMM_X64_GPR_RAX] &=
1424 (CPUID_0_0D_ECX1_EAX_XSAVEOPT |
1425 CPUID_0_0D_ECX1_EAX_XSAVEC |
1426 CPUID_0_0D_ECX1_EAX_XGETBV);
1427 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1428 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1429 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1430 break;
1431 default:
1432 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1433 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1434 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1435 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1436 break;
1437 }
1438 break;
1439 case 0x0000000E: /* Empty */
1440 case 0x0000000F: /* Intel RDT Monitoring Enumeration */
1441 case 0x00000010: /* Intel RDT Allocation Enumeration */
1442 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1443 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1444 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1445 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1446 break;
1447 case 0x00000011: /* Empty */
1448 case 0x00000012: /* Intel SGX Capability Enumeration */
1449 case 0x00000013: /* Empty */
1450 case 0x00000014: /* Intel Processor Trace Enumeration */
1451 cpudata->gprs[NVMM_X64_GPR_RAX] = 0;
1452 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1453 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1454 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1455 break;
1456 case 0x00000015: /* TSC and Nominal Core Crystal Clock Information */
1457 case 0x00000016: /* Processor Frequency Information */
1458 break;
1459
1460 case 0x40000000: /* Hypervisor Information */
1461 cpudata->gprs[NVMM_X64_GPR_RAX] = VMX_CPUID_MAX_HYPERVISOR;
1462 cpudata->gprs[NVMM_X64_GPR_RBX] = 0;
1463 cpudata->gprs[NVMM_X64_GPR_RCX] = 0;
1464 cpudata->gprs[NVMM_X64_GPR_RDX] = 0;
1465 memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
1466 memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
1467 memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1468 break;
1469
1470 case 0x80000000:
1471 cpudata->gprs[NVMM_X64_GPR_RAX] = vmx_cpuid_max_extended;
1472 break;
1473 case 0x80000001:
1474 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000001.eax;
1475 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000001.ebx;
1476 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000001.ecx;
1477 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000001.edx;
1478 break;
1479 case 0x80000002: /* Processor Brand String */
1480 case 0x80000003: /* Processor Brand String */
1481 case 0x80000004: /* Processor Brand String */
1482 case 0x80000005: /* Reserved Zero */
1483 case 0x80000006: /* Cache Information */
1484 break;
1485 case 0x80000007: /* TSC Information */
1486 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000007.eax;
1487 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000007.ebx;
1488 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000007.ecx;
1489 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000007.edx;
1490 break;
1491 case 0x80000008: /* Address Sizes */
1492 cpudata->gprs[NVMM_X64_GPR_RAX] &= nvmm_cpuid_80000008.eax;
1493 cpudata->gprs[NVMM_X64_GPR_RBX] &= nvmm_cpuid_80000008.ebx;
1494 cpudata->gprs[NVMM_X64_GPR_RCX] &= nvmm_cpuid_80000008.ecx;
1495 cpudata->gprs[NVMM_X64_GPR_RDX] &= nvmm_cpuid_80000008.edx;
1496 break;
1497
1498 default:
1499 break;
1500 }
1501 }
1502
1503 static void
vmx_exit_insn(struct nvmm_vcpu_exit * exit,uint64_t reason)1504 vmx_exit_insn(struct nvmm_vcpu_exit *exit, uint64_t reason)
1505 {
1506 uint64_t inslen, rip;
1507
1508 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1509 rip = vmx_vmread(VMCS_GUEST_RIP);
1510 exit->u.insn.npc = rip + inslen;
1511 exit->reason = reason;
1512 }
1513
1514 static void
vmx_exit_cpuid(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1515 vmx_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1516 struct nvmm_vcpu_exit *exit)
1517 {
1518 struct vmx_cpudata *cpudata = vcpu->cpudata;
1519 struct nvmm_vcpu_conf_cpuid *cpuid;
1520 uint32_t eax, ecx;
1521 size_t i;
1522
1523 eax = (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1524 ecx = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1525 vmx_inkernel_exec_cpuid(cpudata, eax, ecx);
1526 vmx_inkernel_handle_cpuid(mach, vcpu, eax, ecx);
1527
1528 for (i = 0; i < VMX_NCPUIDS; i++) {
1529 if (!cpudata->cpuidpresent[i]) {
1530 continue;
1531 }
1532 cpuid = &cpudata->cpuid[i];
1533 if (cpuid->leaf != eax) {
1534 continue;
1535 }
1536
1537 if (cpuid->exit) {
1538 vmx_exit_insn(exit, NVMM_VCPU_EXIT_CPUID);
1539 return;
1540 }
1541 OS_ASSERT(cpuid->mask);
1542
1543 /* del */
1544 cpudata->gprs[NVMM_X64_GPR_RAX] &= ~cpuid->u.mask.del.eax;
1545 cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->u.mask.del.ebx;
1546 cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->u.mask.del.ecx;
1547 cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->u.mask.del.edx;
1548
1549 /* set */
1550 cpudata->gprs[NVMM_X64_GPR_RAX] |= cpuid->u.mask.set.eax;
1551 cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->u.mask.set.ebx;
1552 cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->u.mask.set.ecx;
1553 cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->u.mask.set.edx;
1554
1555 break;
1556 }
1557
1558 vmx_inkernel_advance();
1559 exit->reason = NVMM_VCPU_EXIT_NONE;
1560 }
1561
1562 static void
vmx_exit_hlt(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1563 vmx_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1564 struct nvmm_vcpu_exit *exit)
1565 {
1566 struct vmx_cpudata *cpudata = vcpu->cpudata;
1567 uint64_t rflags;
1568
1569 if (cpudata->int_window_exit) {
1570 rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
1571 if (rflags & PSL_I) {
1572 vmx_event_waitexit_disable(vcpu, false);
1573 }
1574 }
1575
1576 vmx_inkernel_advance();
1577 exit->reason = NVMM_VCPU_EXIT_HALTED;
1578 }
1579
1580 #define VMX_QUAL_CR_NUM __BITS(3,0)
1581 #define VMX_QUAL_CR_TYPE __BITS(5,4)
1582 #define CR_TYPE_WRITE 0
1583 #define CR_TYPE_READ 1
1584 #define CR_TYPE_CLTS 2
1585 #define CR_TYPE_LMSW 3
1586 #define VMX_QUAL_CR_LMSW_OPMEM __BIT(6)
1587 #define VMX_QUAL_CR_GPR __BITS(11,8)
1588 #define VMX_QUAL_CR_LMSW_SRC __BIT(31,16)
1589
1590 static inline int
vmx_check_cr(uint64_t crval,uint64_t fixed0,uint64_t fixed1)1591 vmx_check_cr(uint64_t crval, uint64_t fixed0, uint64_t fixed1)
1592 {
1593 /* Bits set to 1 in fixed0 are fixed to 1. */
1594 if ((crval & fixed0) != fixed0) {
1595 return -1;
1596 }
1597 /* Bits set to 0 in fixed1 are fixed to 0. */
1598 if (crval & ~fixed1) {
1599 return -1;
1600 }
1601 return 0;
1602 }
1603
1604 static int
vmx_inkernel_handle_cr0(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t qual)1605 vmx_inkernel_handle_cr0(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1606 uint64_t qual)
1607 {
1608 struct vmx_cpudata *cpudata = vcpu->cpudata;
1609 uint64_t type, gpr, oldcr0, realcr0, fakecr0;
1610 uint64_t efer, ctls1;
1611
1612 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1613 if (type != CR_TYPE_WRITE) {
1614 return -1;
1615 }
1616
1617 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1618 OS_ASSERT(gpr < 16);
1619
1620 if (gpr == NVMM_X64_GPR_RSP) {
1621 fakecr0 = vmx_vmread(VMCS_GUEST_RSP);
1622 } else {
1623 fakecr0 = cpudata->gprs[gpr];
1624 }
1625
1626 /*
1627 * fakecr0 is the value the guest believes is in %cr0. realcr0 is the
1628 * actual value in %cr0.
1629 *
1630 * In fakecr0 we must force CR0_ET to 1.
1631 *
1632 * In realcr0 we must force CR0_NW and CR0_CD to 0, and CR0_ET and
1633 * CR0_NE to 1.
1634 */
1635 fakecr0 |= CR0_ET;
1636 realcr0 = (fakecr0 & ~CR0_STATIC_MASK) | CR0_ET | CR0_NE;
1637
1638 if (vmx_check_cr(realcr0, vmx_cr0_fixed0, vmx_cr0_fixed1) == -1) {
1639 return -1;
1640 }
1641
1642 /*
1643 * XXX Handle 32bit PAE paging, need to set PDPTEs, fetched manually
1644 * from CR3.
1645 */
1646
1647 if (realcr0 & CR0_PG) {
1648 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
1649 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
1650 if (efer & EFER_LME) {
1651 ctls1 |= ENTRY_CTLS_LONG_MODE;
1652 efer |= EFER_LMA;
1653 } else {
1654 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
1655 efer &= ~EFER_LMA;
1656 }
1657 vmx_vmwrite(VMCS_GUEST_IA32_EFER, efer);
1658 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
1659 }
1660
1661 oldcr0 = (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
1662 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
1663 if ((oldcr0 ^ fakecr0) & CR0_TLB_FLUSH) {
1664 cpudata->gtlb_want_flush = true;
1665 }
1666
1667 vmx_vmwrite(VMCS_CR0_SHADOW, fakecr0);
1668 vmx_vmwrite(VMCS_GUEST_CR0, realcr0);
1669 vmx_inkernel_advance();
1670 return 0;
1671 }
1672
1673 static int
vmx_inkernel_handle_cr4(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t qual)1674 vmx_inkernel_handle_cr4(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1675 uint64_t qual)
1676 {
1677 struct vmx_cpudata *cpudata = vcpu->cpudata;
1678 uint64_t type, gpr, oldcr4, cr4;
1679
1680 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1681 if (type != CR_TYPE_WRITE) {
1682 return -1;
1683 }
1684
1685 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1686 OS_ASSERT(gpr < 16);
1687
1688 if (gpr == NVMM_X64_GPR_RSP) {
1689 gpr = vmx_vmread(VMCS_GUEST_RSP);
1690 } else {
1691 gpr = cpudata->gprs[gpr];
1692 }
1693
1694 if (gpr & CR4_INVALID) {
1695 return -1;
1696 }
1697 cr4 = gpr | CR4_VMXE;
1698 if (vmx_check_cr(cr4, vmx_cr4_fixed0, vmx_cr4_fixed1) == -1) {
1699 return -1;
1700 }
1701
1702 oldcr4 = vmx_vmread(VMCS_GUEST_CR4);
1703 if ((oldcr4 ^ gpr) & CR4_TLB_FLUSH) {
1704 cpudata->gtlb_want_flush = true;
1705 }
1706
1707 vmx_vmwrite(VMCS_GUEST_CR4, cr4);
1708 vmx_inkernel_advance();
1709 return 0;
1710 }
1711
1712 static int
vmx_inkernel_handle_cr8(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,uint64_t qual,struct nvmm_vcpu_exit * exit)1713 vmx_inkernel_handle_cr8(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1714 uint64_t qual, struct nvmm_vcpu_exit *exit)
1715 {
1716 struct vmx_cpudata *cpudata = vcpu->cpudata;
1717 uint64_t type, gpr;
1718 bool write;
1719
1720 type = __SHIFTOUT(qual, VMX_QUAL_CR_TYPE);
1721 if (type == CR_TYPE_WRITE) {
1722 write = true;
1723 } else if (type == CR_TYPE_READ) {
1724 write = false;
1725 } else {
1726 return -1;
1727 }
1728
1729 gpr = __SHIFTOUT(qual, VMX_QUAL_CR_GPR);
1730 OS_ASSERT(gpr < 16);
1731
1732 if (write) {
1733 if (gpr == NVMM_X64_GPR_RSP) {
1734 cpudata->gcr8 = vmx_vmread(VMCS_GUEST_RSP);
1735 } else {
1736 cpudata->gcr8 = cpudata->gprs[gpr];
1737 }
1738 if (cpudata->tpr.exit_changed) {
1739 exit->reason = NVMM_VCPU_EXIT_TPR_CHANGED;
1740 }
1741 } else {
1742 if (gpr == NVMM_X64_GPR_RSP) {
1743 vmx_vmwrite(VMCS_GUEST_RSP, cpudata->gcr8);
1744 } else {
1745 cpudata->gprs[gpr] = cpudata->gcr8;
1746 }
1747 }
1748
1749 vmx_inkernel_advance();
1750 return 0;
1751 }
1752
1753 static void
vmx_exit_cr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1754 vmx_exit_cr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1755 struct nvmm_vcpu_exit *exit)
1756 {
1757 uint64_t qual;
1758 int ret;
1759
1760 exit->reason = NVMM_VCPU_EXIT_NONE;
1761
1762 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1763
1764 switch (__SHIFTOUT(qual, VMX_QUAL_CR_NUM)) {
1765 case 0:
1766 ret = vmx_inkernel_handle_cr0(mach, vcpu, qual);
1767 break;
1768 case 4:
1769 ret = vmx_inkernel_handle_cr4(mach, vcpu, qual);
1770 break;
1771 case 8:
1772 ret = vmx_inkernel_handle_cr8(mach, vcpu, qual, exit);
1773 break;
1774 default:
1775 ret = -1;
1776 break;
1777 }
1778
1779 if (ret == -1) {
1780 vmx_inject_gp(vcpu);
1781 }
1782 }
1783
1784 #define VMX_QUAL_IO_SIZE __BITS(2,0)
1785 #define IO_SIZE_8 0
1786 #define IO_SIZE_16 1
1787 #define IO_SIZE_32 3
1788 #define VMX_QUAL_IO_IN __BIT(3)
1789 #define VMX_QUAL_IO_STR __BIT(4)
1790 #define VMX_QUAL_IO_REP __BIT(5)
1791 #define VMX_QUAL_IO_DX __BIT(6)
1792 #define VMX_QUAL_IO_PORT __BITS(31,16)
1793
1794 #define VMX_INFO_IO_ADRSIZE __BITS(9,7)
1795 #define IO_ADRSIZE_16 0
1796 #define IO_ADRSIZE_32 1
1797 #define IO_ADRSIZE_64 2
1798 #define VMX_INFO_IO_SEG __BITS(17,15)
1799
1800 static void
vmx_exit_io(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1801 vmx_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1802 struct nvmm_vcpu_exit *exit)
1803 {
1804 uint64_t qual, info, inslen, rip;
1805
1806 qual = vmx_vmread(VMCS_EXIT_QUALIFICATION);
1807 info = vmx_vmread(VMCS_EXIT_INSTRUCTION_INFO);
1808
1809 exit->reason = NVMM_VCPU_EXIT_IO;
1810
1811 exit->u.io.in = (qual & VMX_QUAL_IO_IN) != 0;
1812 exit->u.io.port = __SHIFTOUT(qual, VMX_QUAL_IO_PORT);
1813
1814 OS_ASSERT(__SHIFTOUT(info, VMX_INFO_IO_SEG) < 6);
1815 exit->u.io.seg = __SHIFTOUT(info, VMX_INFO_IO_SEG);
1816
1817 if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_64) {
1818 exit->u.io.address_size = 8;
1819 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_32) {
1820 exit->u.io.address_size = 4;
1821 } else if (__SHIFTOUT(info, VMX_INFO_IO_ADRSIZE) == IO_ADRSIZE_16) {
1822 exit->u.io.address_size = 2;
1823 }
1824
1825 if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_32) {
1826 exit->u.io.operand_size = 4;
1827 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_16) {
1828 exit->u.io.operand_size = 2;
1829 } else if (__SHIFTOUT(qual, VMX_QUAL_IO_SIZE) == IO_SIZE_8) {
1830 exit->u.io.operand_size = 1;
1831 }
1832
1833 exit->u.io.rep = (qual & VMX_QUAL_IO_REP) != 0;
1834 exit->u.io.str = (qual & VMX_QUAL_IO_STR) != 0;
1835
1836 if (exit->u.io.in && exit->u.io.str) {
1837 exit->u.io.seg = NVMM_X64_SEG_ES;
1838 }
1839
1840 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1841 rip = vmx_vmread(VMCS_GUEST_RIP);
1842 exit->u.io.npc = rip + inslen;
1843
1844 vmx_vcpu_state_provide(vcpu,
1845 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1846 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1847 }
1848
1849 static const uint64_t msr_ignore_list[] = {
1850 MSR_BIOS_SIGN,
1851 MSR_IA32_PLATFORM_ID
1852 };
1853
1854 static bool
vmx_inkernel_handle_msr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1855 vmx_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1856 struct nvmm_vcpu_exit *exit)
1857 {
1858 struct vmx_cpudata *cpudata = vcpu->cpudata;
1859 uint64_t val;
1860 size_t i;
1861
1862 if (exit->reason == NVMM_VCPU_EXIT_RDMSR) {
1863 if (exit->u.rdmsr.msr == MSR_CR_PAT) {
1864 val = vmx_vmread(VMCS_GUEST_IA32_PAT);
1865 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1866 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1867 goto handled;
1868 }
1869 if (exit->u.rdmsr.msr == MSR_MISC_ENABLE) {
1870 val = cpudata->gmsr_misc_enable;
1871 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1872 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1873 goto handled;
1874 }
1875 if (exit->u.rdmsr.msr == MSR_IA32_ARCH_CAPABILITIES) {
1876 cpuid_desc_t descs;
1877 x86_get_cpuid(0x00000000, &descs);
1878 if (descs.eax < 7) {
1879 goto error;
1880 }
1881 x86_get_cpuid(0x00000007, &descs);
1882 if (!(descs.edx & CPUID_0_07_EDX_ARCH_CAP)) {
1883 goto error;
1884 }
1885 val = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
1886 val &= (IA32_ARCH_RDCL_NO |
1887 IA32_ARCH_SSB_NO |
1888 IA32_ARCH_MDS_NO |
1889 IA32_ARCH_TAA_NO);
1890 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1891 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1892 goto handled;
1893 }
1894 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1895 if (msr_ignore_list[i] != exit->u.rdmsr.msr)
1896 continue;
1897 val = 0;
1898 cpudata->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF);
1899 cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1900 goto handled;
1901 }
1902 } else {
1903 if (exit->u.wrmsr.msr == MSR_TSC) {
1904 cpudata->gtsc_offset = exit->u.wrmsr.val - rdtsc();
1905 cpudata->gtsc_want_update = true;
1906 goto handled;
1907 }
1908 if (exit->u.wrmsr.msr == MSR_CR_PAT) {
1909 val = exit->u.wrmsr.val;
1910 if (__predict_false(!nvmm_x86_pat_validate(val))) {
1911 goto error;
1912 }
1913 vmx_vmwrite(VMCS_GUEST_IA32_PAT, val);
1914 goto handled;
1915 }
1916 if (exit->u.wrmsr.msr == MSR_MISC_ENABLE) {
1917 /* Don't care. */
1918 goto handled;
1919 }
1920 for (i = 0; i < __arraycount(msr_ignore_list); i++) {
1921 if (msr_ignore_list[i] != exit->u.wrmsr.msr)
1922 continue;
1923 goto handled;
1924 }
1925 }
1926
1927 return false;
1928
1929 handled:
1930 vmx_inkernel_advance();
1931 return true;
1932
1933 error:
1934 vmx_inject_gp(vcpu);
1935 return true;
1936 }
1937
1938 static void
vmx_exit_rdmsr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1939 vmx_exit_rdmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1940 struct nvmm_vcpu_exit *exit)
1941 {
1942 struct vmx_cpudata *cpudata = vcpu->cpudata;
1943 uint64_t inslen, rip;
1944
1945 exit->reason = NVMM_VCPU_EXIT_RDMSR;
1946 exit->u.rdmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1947
1948 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1949 exit->reason = NVMM_VCPU_EXIT_NONE;
1950 return;
1951 }
1952
1953 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1954 rip = vmx_vmread(VMCS_GUEST_RIP);
1955 exit->u.rdmsr.npc = rip + inslen;
1956
1957 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1958 }
1959
1960 static void
vmx_exit_wrmsr(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1961 vmx_exit_wrmsr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1962 struct nvmm_vcpu_exit *exit)
1963 {
1964 struct vmx_cpudata *cpudata = vcpu->cpudata;
1965 uint64_t rdx, rax, inslen, rip;
1966
1967 rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1968 rax = cpudata->gprs[NVMM_X64_GPR_RAX];
1969
1970 exit->reason = NVMM_VCPU_EXIT_WRMSR;
1971 exit->u.wrmsr.msr = (cpudata->gprs[NVMM_X64_GPR_RCX] & 0xFFFFFFFF);
1972 exit->u.wrmsr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
1973
1974 if (vmx_inkernel_handle_msr(mach, vcpu, exit)) {
1975 exit->reason = NVMM_VCPU_EXIT_NONE;
1976 return;
1977 }
1978
1979 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
1980 rip = vmx_vmread(VMCS_GUEST_RIP);
1981 exit->u.wrmsr.npc = rip + inslen;
1982
1983 vmx_vcpu_state_provide(vcpu, NVMM_X64_STATE_GPRS);
1984 }
1985
1986 static void
vmx_exit_xsetbv(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)1987 vmx_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
1988 struct nvmm_vcpu_exit *exit)
1989 {
1990 struct vmx_cpudata *cpudata = vcpu->cpudata;
1991 uint64_t val;
1992
1993 exit->reason = NVMM_VCPU_EXIT_NONE;
1994
1995 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1996 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1997
1998 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1999 goto error;
2000 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
2001 goto error;
2002 } else if (__predict_false((val & XCR0_X87) == 0)) {
2003 goto error;
2004 }
2005
2006 cpudata->gxcr0 = val;
2007
2008 vmx_inkernel_advance();
2009 return;
2010
2011 error:
2012 vmx_inject_gp(vcpu);
2013 }
2014
2015 #define VMX_EPT_VIOLATION_READ __BIT(0)
2016 #define VMX_EPT_VIOLATION_WRITE __BIT(1)
2017 #define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
2018
2019 static void
vmx_exit_epf(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)2020 vmx_exit_epf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2021 struct nvmm_vcpu_exit *exit)
2022 {
2023 uint64_t perm;
2024 gpaddr_t gpa;
2025
2026 gpa = vmx_vmread(VMCS_GUEST_PHYSICAL_ADDRESS);
2027
2028 exit->reason = NVMM_VCPU_EXIT_MEMORY;
2029 perm = vmx_vmread(VMCS_EXIT_QUALIFICATION);
2030 if (perm & VMX_EPT_VIOLATION_WRITE)
2031 exit->u.mem.prot = PROT_WRITE;
2032 else if (perm & VMX_EPT_VIOLATION_EXECUTE)
2033 exit->u.mem.prot = PROT_EXEC;
2034 else
2035 exit->u.mem.prot = PROT_READ;
2036 exit->u.mem.gpa = gpa;
2037 exit->u.mem.inst_len = 0;
2038
2039 vmx_vcpu_state_provide(vcpu,
2040 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
2041 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
2042 }
2043
2044 /* -------------------------------------------------------------------------- */
2045
2046 static void
vmx_vcpu_guest_fpu_enter(struct nvmm_cpu * vcpu)2047 vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
2048 {
2049 struct vmx_cpudata *cpudata = vcpu->cpudata;
2050
2051 #if defined(__NetBSD__)
2052 x86_curthread_save_fpu();
2053 #elif defined(__DragonFly__)
2054 /*
2055 * NOTE: Host FPU state depends on whether the user program used the
2056 * FPU or not. Need to use npxpush()/npxpop() to handle this.
2057 */
2058 npxpush(&cpudata->hstate.hmctx);
2059 #endif
2060
2061 x86_restore_fpu(&cpudata->gxsave, vmx_xcr0_mask);
2062 if (vmx_xcr0_mask != 0) {
2063 x86_set_xcr(0, cpudata->gxcr0);
2064 }
2065 }
2066
2067 static void
vmx_vcpu_guest_fpu_leave(struct nvmm_cpu * vcpu)2068 vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
2069 {
2070 struct vmx_cpudata *cpudata = vcpu->cpudata;
2071
2072 if (vmx_xcr0_mask != 0) {
2073 x86_set_xcr(0, vmx_global_hstate.xcr0);
2074 }
2075 x86_save_fpu(&cpudata->gxsave, vmx_xcr0_mask);
2076
2077 #if defined(__NetBSD__)
2078 x86_curthread_restore_fpu();
2079 #elif defined(__DragonFly__)
2080 npxpop(&cpudata->hstate.hmctx);
2081 #endif
2082 }
2083
2084 static void
vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu * vcpu)2085 vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
2086 {
2087 struct vmx_cpudata *cpudata = vcpu->cpudata;
2088
2089 x86_curthread_save_dbregs(cpudata->hstate.drs);
2090
2091 x86_set_dr7(0);
2092
2093 x86_set_dr0(cpudata->drs[NVMM_X64_DR_DR0]);
2094 x86_set_dr1(cpudata->drs[NVMM_X64_DR_DR1]);
2095 x86_set_dr2(cpudata->drs[NVMM_X64_DR_DR2]);
2096 x86_set_dr3(cpudata->drs[NVMM_X64_DR_DR3]);
2097 x86_set_dr6(cpudata->drs[NVMM_X64_DR_DR6]);
2098 }
2099
2100 static void
vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu * vcpu)2101 vmx_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
2102 {
2103 struct vmx_cpudata *cpudata = vcpu->cpudata;
2104
2105 cpudata->drs[NVMM_X64_DR_DR0] = x86_get_dr0();
2106 cpudata->drs[NVMM_X64_DR_DR1] = x86_get_dr1();
2107 cpudata->drs[NVMM_X64_DR_DR2] = x86_get_dr2();
2108 cpudata->drs[NVMM_X64_DR_DR3] = x86_get_dr3();
2109 cpudata->drs[NVMM_X64_DR_DR6] = x86_get_dr6();
2110
2111 x86_curthread_restore_dbregs(cpudata->hstate.drs);
2112 }
2113
2114 static void
vmx_vcpu_guest_misc_enter(struct nvmm_cpu * vcpu)2115 vmx_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
2116 {
2117 struct vmx_cpudata *cpudata = vcpu->cpudata;
2118
2119 /* This gets restored automatically by the CPU. */
2120 vmx_vmwrite(VMCS_HOST_IDTR_BASE, (uint64_t)os_curcpu_idt());
2121 vmx_vmwrite(VMCS_HOST_FS_BASE, rdmsr(MSR_FSBASE));
2122 vmx_vmwrite(VMCS_HOST_CR3, x86_get_cr3());
2123 vmx_vmwrite(VMCS_HOST_CR4, x86_get_cr4());
2124
2125 /* Save the percpu host state. */
2126 cpudata->hstate.kernelgsbase = rdmsr(MSR_KERNELGSBASE);
2127 }
2128
2129 static void
vmx_vcpu_guest_misc_leave(struct nvmm_cpu * vcpu)2130 vmx_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
2131 {
2132 struct vmx_cpudata *cpudata = vcpu->cpudata;
2133
2134 /* Restore the global host state. */
2135 wrmsr(MSR_STAR, vmx_global_hstate.star);
2136 wrmsr(MSR_LSTAR, vmx_global_hstate.lstar);
2137 wrmsr(MSR_CSTAR, vmx_global_hstate.cstar);
2138 wrmsr(MSR_SFMASK, vmx_global_hstate.sfmask);
2139
2140 /* Restore the percpu host state. */
2141 wrmsr(MSR_KERNELGSBASE, cpudata->hstate.kernelgsbase);
2142 }
2143
2144 /* -------------------------------------------------------------------------- */
2145
2146 #define VMX_INVVPID_ADDRESS 0
2147 #define VMX_INVVPID_CONTEXT 1
2148 #define VMX_INVVPID_ALL 2
2149 #define VMX_INVVPID_CONTEXT_NOGLOBAL 3
2150
2151 #define VMX_INVEPT_CONTEXT 1
2152 #define VMX_INVEPT_ALL 2
2153
2154 static inline void
vmx_gtlb_catchup(struct nvmm_cpu * vcpu,int hcpu)2155 vmx_gtlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2156 {
2157 struct vmx_cpudata *cpudata = vcpu->cpudata;
2158
2159 if (vcpu->hcpu_last != hcpu) {
2160 cpudata->gtlb_want_flush = true;
2161 }
2162 }
2163
2164 static inline void
vmx_htlb_catchup(struct nvmm_cpu * vcpu,int hcpu)2165 vmx_htlb_catchup(struct nvmm_cpu *vcpu, int hcpu)
2166 {
2167 struct vmx_cpudata *cpudata = vcpu->cpudata;
2168 struct ept_desc ept_desc;
2169
2170 if (__predict_true(!os_cpuset_isset(cpudata->htlb_want_flush, hcpu))) {
2171 return;
2172 }
2173
2174 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2175 ept_desc.mbz = 0;
2176 vmx_invept(vmx_ept_flush_op, &ept_desc);
2177 os_cpuset_clear(cpudata->htlb_want_flush, hcpu);
2178 }
2179
2180 static inline uint64_t
vmx_htlb_flush(struct nvmm_machine * mach,struct vmx_cpudata * cpudata)2181 vmx_htlb_flush(struct nvmm_machine *mach, struct vmx_cpudata *cpudata)
2182 {
2183 struct ept_desc ept_desc;
2184 uint64_t machgen;
2185
2186 #if defined(__NetBSD__)
2187 machgen = ((struct vmx_machdata *)mach->machdata)->mach_htlb_gen;
2188 #elif defined(__DragonFly__)
2189 clear_xinvltlb();
2190 machgen = vmspace_pmap(mach->vm)->pm_invgen;
2191 #endif
2192 if (__predict_true(machgen == cpudata->vcpu_htlb_gen)) {
2193 return machgen;
2194 }
2195
2196 os_cpuset_setrunning(cpudata->htlb_want_flush);
2197
2198 ept_desc.eptp = vmx_vmread(VMCS_EPTP);
2199 ept_desc.mbz = 0;
2200 vmx_invept(vmx_ept_flush_op, &ept_desc);
2201
2202 return machgen;
2203 }
2204
2205 static inline void
vmx_htlb_flush_ack(struct vmx_cpudata * cpudata,uint64_t machgen)2206 vmx_htlb_flush_ack(struct vmx_cpudata *cpudata, uint64_t machgen)
2207 {
2208 cpudata->vcpu_htlb_gen = machgen;
2209 os_cpuset_clear(cpudata->htlb_want_flush, os_curcpu_number());
2210 }
2211
2212 static inline void
vmx_exit_evt(struct vmx_cpudata * cpudata)2213 vmx_exit_evt(struct vmx_cpudata *cpudata)
2214 {
2215 uint64_t info, err, inslen;
2216
2217 cpudata->evt_pending = false;
2218
2219 info = vmx_vmread(VMCS_IDT_VECTORING_INFO);
2220 if (__predict_true((info & INTR_INFO_VALID) == 0)) {
2221 return;
2222 }
2223 err = vmx_vmread(VMCS_IDT_VECTORING_ERROR);
2224
2225 vmx_vmwrite(VMCS_ENTRY_INTR_INFO, info);
2226 vmx_vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, err);
2227
2228 switch (__SHIFTOUT(info, INTR_INFO_TYPE)) {
2229 case INTR_TYPE_SW_INT:
2230 case INTR_TYPE_PRIV_SW_EXC:
2231 case INTR_TYPE_SW_EXC:
2232 inslen = vmx_vmread(VMCS_EXIT_INSTRUCTION_LENGTH);
2233 vmx_vmwrite(VMCS_ENTRY_INSTRUCTION_LENGTH, inslen);
2234 }
2235
2236 cpudata->evt_pending = true;
2237 }
2238
2239 static int
vmx_vcpu_run(struct nvmm_machine * mach,struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)2240 vmx_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
2241 struct nvmm_vcpu_exit *exit)
2242 {
2243 struct nvmm_comm_page *comm = vcpu->comm;
2244 struct vmx_cpudata *cpudata = vcpu->cpudata;
2245 struct vpid_desc vpid_desc;
2246 uint64_t exitcode;
2247 uint64_t intstate;
2248 uint64_t machgen;
2249 int hcpu, ret;
2250 int error = 0;
2251 bool launched;
2252
2253 vmx_vmcs_enter(vcpu);
2254
2255 vmx_vcpu_state_commit(vcpu);
2256 comm->state_cached = 0;
2257
2258 #ifndef __DragonFly__
2259 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
2260 vmx_vmcs_leave(vcpu);
2261 return EINVAL;
2262 }
2263 #endif
2264
2265 hcpu = os_curcpu_number();
2266 launched = cpudata->vmcs_launched;
2267
2268 vmx_gtlb_catchup(vcpu, hcpu);
2269 vmx_htlb_catchup(vcpu, hcpu);
2270
2271 if (vcpu->hcpu_last != hcpu) {
2272 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, os_curcpu_tss_sel());
2273 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)os_curcpu_tss());
2274 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)os_curcpu_gdt());
2275 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
2276 cpudata->gtsc_want_update = true;
2277 vcpu->hcpu_last = hcpu;
2278
2279 #ifdef __DragonFly__
2280 /*
2281 * XXX: We aren't tracking overloaded CPUs (multiple vCPUs
2282 * scheduled on the same physical CPU) yet so there are
2283 * currently no calls to pmap_del_cpu().
2284 */
2285 pmap_add_cpu(mach->vm, hcpu);
2286 #endif
2287 }
2288
2289 vmx_vcpu_guest_dbregs_enter(vcpu);
2290 vmx_vcpu_guest_misc_enter(vcpu);
2291
2292 while (1) {
2293 if (cpudata->gtlb_want_flush) {
2294 vpid_desc.vpid = cpudata->asid;
2295 vpid_desc.addr = 0;
2296 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
2297 cpudata->gtlb_want_flush = false;
2298 }
2299
2300 if (__predict_false(cpudata->gtsc_want_update)) {
2301 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc_offset);
2302 cpudata->gtsc_want_update = false;
2303 }
2304
2305 vmx_cli();
2306 vmx_vcpu_guest_fpu_enter(vcpu);
2307 machgen = vmx_htlb_flush(mach, cpudata);
2308
2309 #ifdef __DragonFly__
2310 /*
2311 * Check for pending host events (e.g., interrupt, AST)
2312 * to make the state safe to VM Entry. This check must
2313 * be done after the cli to avoid gd_reqflags pending
2314 * races.
2315 *
2316 * Emulators may assume that event injection succeeds, but
2317 * we have to return to process these events. To deal with
2318 * this, use ERESTART mechanics.
2319 */
2320 if (__predict_false(mycpu->gd_reqflags & RQF_HVM_MASK)) {
2321 /* INVEPT executed, so ack hTLB flush. */
2322 vmx_htlb_flush_ack(cpudata, machgen);
2323 vmx_vcpu_guest_fpu_leave(vcpu);
2324 vmx_sti();
2325 exit->reason = NVMM_VCPU_EXIT_NONE;
2326 error = ERESTART;
2327 break;
2328 }
2329
2330 /*
2331 * Only commit event requests when we are absolutely
2332 * sure that we can issue the vmlaunch/vmresume.
2333 */
2334 if (__predict_false(vmx_vcpu_event_commit(vcpu) != 0)) {
2335 /* INVEPT executed, so ack hTLB flush. */
2336 vmx_htlb_flush_ack(cpudata, machgen);
2337 vmx_vcpu_guest_fpu_leave(vcpu);
2338 vmx_sti();
2339 exit->reason = NVMM_VCPU_EXIT_NONE;
2340 error = EINVAL;
2341 break;
2342 }
2343 #endif
2344
2345 x86_set_cr2(cpudata->gcr2);
2346 if (launched) {
2347 ret = vmx_vmresume(cpudata->gprs);
2348 } else {
2349 ret = vmx_vmlaunch(cpudata->gprs);
2350 }
2351 cpudata->gcr2 = x86_get_cr2();
2352 vmx_htlb_flush_ack(cpudata, machgen);
2353 vmx_vcpu_guest_fpu_leave(vcpu);
2354 vmx_sti();
2355
2356 if (__predict_false(ret != 0)) {
2357 vmx_exit_invalid(exit, -1);
2358 break;
2359 }
2360 vmx_exit_evt(cpudata);
2361
2362 launched = true;
2363
2364 exitcode = vmx_vmread(VMCS_EXIT_REASON);
2365 exitcode &= __BITS(15,0);
2366
2367 switch (exitcode) {
2368 case VMCS_EXITCODE_EXC_NMI:
2369 vmx_exit_exc_nmi(mach, vcpu, exit);
2370 break;
2371 case VMCS_EXITCODE_EXT_INT:
2372 exit->reason = NVMM_VCPU_EXIT_NONE;
2373 break;
2374 case VMCS_EXITCODE_CPUID:
2375 vmx_exit_cpuid(mach, vcpu, exit);
2376 break;
2377 case VMCS_EXITCODE_HLT:
2378 vmx_exit_hlt(mach, vcpu, exit);
2379 break;
2380 case VMCS_EXITCODE_CR:
2381 vmx_exit_cr(mach, vcpu, exit);
2382 break;
2383 case VMCS_EXITCODE_IO:
2384 vmx_exit_io(mach, vcpu, exit);
2385 break;
2386 case VMCS_EXITCODE_RDMSR:
2387 vmx_exit_rdmsr(mach, vcpu, exit);
2388 break;
2389 case VMCS_EXITCODE_WRMSR:
2390 vmx_exit_wrmsr(mach, vcpu, exit);
2391 break;
2392 case VMCS_EXITCODE_SHUTDOWN:
2393 exit->reason = NVMM_VCPU_EXIT_SHUTDOWN;
2394 break;
2395 case VMCS_EXITCODE_MONITOR:
2396 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MONITOR);
2397 break;
2398 case VMCS_EXITCODE_MWAIT:
2399 vmx_exit_insn(exit, NVMM_VCPU_EXIT_MWAIT);
2400 break;
2401 case VMCS_EXITCODE_XSETBV:
2402 vmx_exit_xsetbv(mach, vcpu, exit);
2403 break;
2404 case VMCS_EXITCODE_RDPMC:
2405 case VMCS_EXITCODE_RDTSCP:
2406 case VMCS_EXITCODE_INVVPID:
2407 case VMCS_EXITCODE_INVEPT:
2408 case VMCS_EXITCODE_VMCALL:
2409 case VMCS_EXITCODE_VMCLEAR:
2410 case VMCS_EXITCODE_VMLAUNCH:
2411 case VMCS_EXITCODE_VMPTRLD:
2412 case VMCS_EXITCODE_VMPTRST:
2413 case VMCS_EXITCODE_VMREAD:
2414 case VMCS_EXITCODE_VMRESUME:
2415 case VMCS_EXITCODE_VMWRITE:
2416 case VMCS_EXITCODE_VMXOFF:
2417 case VMCS_EXITCODE_VMXON:
2418 vmx_inject_ud(vcpu);
2419 exit->reason = NVMM_VCPU_EXIT_NONE;
2420 break;
2421 case VMCS_EXITCODE_EPT_VIOLATION:
2422 vmx_exit_epf(mach, vcpu, exit);
2423 break;
2424 case VMCS_EXITCODE_INT_WINDOW:
2425 vmx_event_waitexit_disable(vcpu, false);
2426 exit->reason = NVMM_VCPU_EXIT_INT_READY;
2427 break;
2428 case VMCS_EXITCODE_NMI_WINDOW:
2429 vmx_event_waitexit_disable(vcpu, true);
2430 exit->reason = NVMM_VCPU_EXIT_NMI_READY;
2431 break;
2432 default:
2433 vmx_exit_invalid(exit, exitcode);
2434 break;
2435 }
2436
2437 /* If no reason to return to userland, keep rolling. */
2438 if (os_return_needed()) {
2439 break;
2440 }
2441 if (exit->reason != NVMM_VCPU_EXIT_NONE) {
2442 break;
2443 }
2444 }
2445
2446 cpudata->vmcs_launched = launched;
2447
2448 vmx_vcpu_guest_misc_leave(vcpu);
2449 vmx_vcpu_guest_dbregs_leave(vcpu);
2450
2451 exit->exitstate.rflags = vmx_vmread(VMCS_GUEST_RFLAGS);
2452 exit->exitstate.cr8 = cpudata->gcr8;
2453 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2454 exit->exitstate.int_shadow =
2455 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2456 exit->exitstate.int_window_exiting = cpudata->int_window_exit;
2457 exit->exitstate.nmi_window_exiting = cpudata->nmi_window_exit;
2458 exit->exitstate.evt_pending = cpudata->evt_pending;
2459
2460 vmx_vmcs_leave(vcpu);
2461
2462 return error;
2463 }
2464
2465 /* -------------------------------------------------------------------------- */
2466
2467 static void
vmx_vcpu_msr_allow(uint8_t * bitmap,uint64_t msr,bool read,bool write)2468 vmx_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
2469 {
2470 uint64_t byte;
2471 uint8_t bitoff;
2472
2473 if (msr < 0x00002000) {
2474 /* Range 1 */
2475 byte = ((msr - 0x00000000) / 8) + 0;
2476 } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
2477 /* Range 2 */
2478 byte = ((msr - 0xC0000000) / 8) + 1024;
2479 } else {
2480 panic("%s: wrong range", __func__);
2481 }
2482
2483 bitoff = (msr & 0x7);
2484
2485 if (read) {
2486 bitmap[byte] &= ~__BIT(bitoff);
2487 }
2488 if (write) {
2489 bitmap[2048 + byte] &= ~__BIT(bitoff);
2490 }
2491 }
2492
2493 #define VMX_SEG_ATTRIB_TYPE __BITS(3,0)
2494 #define VMX_SEG_ATTRIB_S __BIT(4)
2495 #define VMX_SEG_ATTRIB_DPL __BITS(6,5)
2496 #define VMX_SEG_ATTRIB_P __BIT(7)
2497 #define VMX_SEG_ATTRIB_AVL __BIT(12)
2498 #define VMX_SEG_ATTRIB_L __BIT(13)
2499 #define VMX_SEG_ATTRIB_DEF __BIT(14)
2500 #define VMX_SEG_ATTRIB_G __BIT(15)
2501 #define VMX_SEG_ATTRIB_UNUSABLE __BIT(16)
2502
2503 static void
vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg * segs,int idx)2504 vmx_vcpu_setstate_seg(const struct nvmm_x64_state_seg *segs, int idx)
2505 {
2506 uint64_t attrib;
2507
2508 attrib =
2509 __SHIFTIN(segs[idx].attrib.type, VMX_SEG_ATTRIB_TYPE) |
2510 __SHIFTIN(segs[idx].attrib.s, VMX_SEG_ATTRIB_S) |
2511 __SHIFTIN(segs[idx].attrib.dpl, VMX_SEG_ATTRIB_DPL) |
2512 __SHIFTIN(segs[idx].attrib.p, VMX_SEG_ATTRIB_P) |
2513 __SHIFTIN(segs[idx].attrib.avl, VMX_SEG_ATTRIB_AVL) |
2514 __SHIFTIN(segs[idx].attrib.l, VMX_SEG_ATTRIB_L) |
2515 __SHIFTIN(segs[idx].attrib.def, VMX_SEG_ATTRIB_DEF) |
2516 __SHIFTIN(segs[idx].attrib.g, VMX_SEG_ATTRIB_G) |
2517 (!segs[idx].attrib.p ? VMX_SEG_ATTRIB_UNUSABLE : 0);
2518
2519 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2520 vmx_vmwrite(vmx_guest_segs[idx].selector, segs[idx].selector);
2521 vmx_vmwrite(vmx_guest_segs[idx].attrib, attrib);
2522 }
2523 vmx_vmwrite(vmx_guest_segs[idx].limit, segs[idx].limit);
2524 vmx_vmwrite(vmx_guest_segs[idx].base, segs[idx].base);
2525 }
2526
2527 static void
vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg * segs,int idx)2528 vmx_vcpu_getstate_seg(struct nvmm_x64_state_seg *segs, int idx)
2529 {
2530 uint64_t selector = 0, attrib = 0, base, limit;
2531
2532 if (idx != NVMM_X64_SEG_GDT && idx != NVMM_X64_SEG_IDT) {
2533 selector = vmx_vmread(vmx_guest_segs[idx].selector);
2534 attrib = vmx_vmread(vmx_guest_segs[idx].attrib);
2535 }
2536 limit = vmx_vmread(vmx_guest_segs[idx].limit);
2537 base = vmx_vmread(vmx_guest_segs[idx].base);
2538
2539 segs[idx].selector = selector;
2540 segs[idx].limit = limit;
2541 segs[idx].base = base;
2542 segs[idx].attrib.type = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_TYPE);
2543 segs[idx].attrib.s = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_S);
2544 segs[idx].attrib.dpl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DPL);
2545 segs[idx].attrib.p = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_P);
2546 segs[idx].attrib.avl = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_AVL);
2547 segs[idx].attrib.l = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_L);
2548 segs[idx].attrib.def = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_DEF);
2549 segs[idx].attrib.g = __SHIFTOUT(attrib, VMX_SEG_ATTRIB_G);
2550 if (attrib & VMX_SEG_ATTRIB_UNUSABLE) {
2551 segs[idx].attrib.p = 0;
2552 }
2553 }
2554
2555 static inline bool
vmx_state_gtlb_flush(const struct nvmm_x64_state * state,uint64_t flags)2556 vmx_state_gtlb_flush(const struct nvmm_x64_state *state, uint64_t flags)
2557 {
2558 uint64_t cr0, cr3, cr4, efer;
2559
2560 if (flags & NVMM_X64_STATE_CRS) {
2561 cr0 = vmx_vmread(VMCS_GUEST_CR0);
2562 if ((cr0 ^ state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
2563 return true;
2564 }
2565 cr3 = vmx_vmread(VMCS_GUEST_CR3);
2566 if (cr3 != state->crs[NVMM_X64_CR_CR3]) {
2567 return true;
2568 }
2569 cr4 = vmx_vmread(VMCS_GUEST_CR4);
2570 if ((cr4 ^ state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
2571 return true;
2572 }
2573 }
2574
2575 if (flags & NVMM_X64_STATE_MSRS) {
2576 efer = vmx_vmread(VMCS_GUEST_IA32_EFER);
2577 if ((efer ^
2578 state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
2579 return true;
2580 }
2581 }
2582
2583 return false;
2584 }
2585
2586 static void
vmx_vcpu_setstate(struct nvmm_cpu * vcpu)2587 vmx_vcpu_setstate(struct nvmm_cpu *vcpu)
2588 {
2589 struct nvmm_comm_page *comm = vcpu->comm;
2590 const struct nvmm_x64_state *state = &comm->state;
2591 struct vmx_cpudata *cpudata = vcpu->cpudata;
2592 struct msr_entry *gmsr = cpudata->gmsr;
2593 struct nvmm_x64_state_fpu *fpustate;
2594 uint64_t ctls1, intstate;
2595 uint64_t flags;
2596
2597 flags = comm->state_wanted;
2598
2599 vmx_vmcs_enter(vcpu);
2600
2601 if (vmx_state_gtlb_flush(state, flags)) {
2602 cpudata->gtlb_want_flush = true;
2603 }
2604
2605 if (flags & NVMM_X64_STATE_SEGS) {
2606 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_CS);
2607 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_DS);
2608 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_ES);
2609 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_FS);
2610 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GS);
2611 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_SS);
2612 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_GDT);
2613 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_IDT);
2614 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_LDT);
2615 vmx_vcpu_setstate_seg(state->segs, NVMM_X64_SEG_TR);
2616 }
2617
2618 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2619 if (flags & NVMM_X64_STATE_GPRS) {
2620 memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
2621
2622 vmx_vmwrite(VMCS_GUEST_RIP, state->gprs[NVMM_X64_GPR_RIP]);
2623 vmx_vmwrite(VMCS_GUEST_RSP, state->gprs[NVMM_X64_GPR_RSP]);
2624 vmx_vmwrite(VMCS_GUEST_RFLAGS, state->gprs[NVMM_X64_GPR_RFLAGS]);
2625 }
2626
2627 if (flags & NVMM_X64_STATE_CRS) {
2628 /*
2629 * CR0_ET must be 1 both in the shadow and the real register.
2630 * CR0_NE must be 1 in the real register.
2631 * CR0_NW and CR0_CD must be 0 in the real register.
2632 */
2633 vmx_vmwrite(VMCS_CR0_SHADOW,
2634 (state->crs[NVMM_X64_CR_CR0] & CR0_STATIC_MASK) |
2635 CR0_ET);
2636 vmx_vmwrite(VMCS_GUEST_CR0,
2637 (state->crs[NVMM_X64_CR_CR0] & ~CR0_STATIC_MASK) |
2638 CR0_ET | CR0_NE);
2639
2640 cpudata->gcr2 = state->crs[NVMM_X64_CR_CR2];
2641
2642 /* XXX We are not handling PDPTE here. */
2643 vmx_vmwrite(VMCS_GUEST_CR3, state->crs[NVMM_X64_CR_CR3]);
2644
2645 /* CR4_VMXE is mandatory. */
2646 vmx_vmwrite(VMCS_GUEST_CR4,
2647 (state->crs[NVMM_X64_CR_CR4] & CR4_VALID) | CR4_VMXE);
2648
2649 cpudata->gcr8 = state->crs[NVMM_X64_CR_CR8];
2650
2651 if (vmx_xcr0_mask != 0) {
2652 /* Clear illegal XCR0 bits, set mandatory X87 bit. */
2653 cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
2654 cpudata->gxcr0 &= vmx_xcr0_mask;
2655 cpudata->gxcr0 |= XCR0_X87;
2656 }
2657 }
2658
2659 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2660 if (flags & NVMM_X64_STATE_DRS) {
2661 memcpy(cpudata->drs, state->drs, sizeof(state->drs));
2662
2663 cpudata->drs[NVMM_X64_DR_DR6] &= 0xFFFFFFFF;
2664 vmx_vmwrite(VMCS_GUEST_DR7, cpudata->drs[NVMM_X64_DR_DR7]);
2665 }
2666
2667 if (flags & NVMM_X64_STATE_MSRS) {
2668 gmsr[VMX_MSRLIST_STAR].val =
2669 state->msrs[NVMM_X64_MSR_STAR];
2670 gmsr[VMX_MSRLIST_LSTAR].val =
2671 state->msrs[NVMM_X64_MSR_LSTAR];
2672 gmsr[VMX_MSRLIST_CSTAR].val =
2673 state->msrs[NVMM_X64_MSR_CSTAR];
2674 gmsr[VMX_MSRLIST_SFMASK].val =
2675 state->msrs[NVMM_X64_MSR_SFMASK];
2676 gmsr[VMX_MSRLIST_KERNELGSBASE].val =
2677 state->msrs[NVMM_X64_MSR_KERNELGSBASE];
2678
2679 vmx_vmwrite(VMCS_GUEST_IA32_EFER,
2680 state->msrs[NVMM_X64_MSR_EFER]);
2681 vmx_vmwrite(VMCS_GUEST_IA32_PAT,
2682 state->msrs[NVMM_X64_MSR_PAT]);
2683 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_CS,
2684 state->msrs[NVMM_X64_MSR_SYSENTER_CS]);
2685 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_ESP,
2686 state->msrs[NVMM_X64_MSR_SYSENTER_ESP]);
2687 vmx_vmwrite(VMCS_GUEST_IA32_SYSENTER_EIP,
2688 state->msrs[NVMM_X64_MSR_SYSENTER_EIP]);
2689
2690 /*
2691 * The emulator might NOT want to set the TSC, because doing
2692 * so would destroy TSC MP-synchronization across CPUs. Try
2693 * to figure out what the emulator meant to do.
2694 *
2695 * If writing the last TSC value we reported via getstate or
2696 * a zero value, assume that the emulator does not want to
2697 * write to the TSC.
2698 */
2699 if (state->msrs[NVMM_X64_MSR_TSC] != cpudata->gtsc_match &&
2700 state->msrs[NVMM_X64_MSR_TSC] != 0) {
2701 cpudata->gtsc_offset =
2702 state->msrs[NVMM_X64_MSR_TSC] - rdtsc();
2703 cpudata->gtsc_want_update = true;
2704 }
2705
2706 /* ENTRY_CTLS_LONG_MODE must match EFER_LMA. */
2707 ctls1 = vmx_vmread(VMCS_ENTRY_CTLS);
2708 if (state->msrs[NVMM_X64_MSR_EFER] & EFER_LMA) {
2709 ctls1 |= ENTRY_CTLS_LONG_MODE;
2710 } else {
2711 ctls1 &= ~ENTRY_CTLS_LONG_MODE;
2712 }
2713 vmx_vmwrite(VMCS_ENTRY_CTLS, ctls1);
2714 }
2715
2716 if (flags & NVMM_X64_STATE_INTR) {
2717 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2718 intstate &= ~(INT_STATE_STI|INT_STATE_MOVSS);
2719 if (state->intr.int_shadow) {
2720 intstate |= INT_STATE_MOVSS;
2721 }
2722 vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY, intstate);
2723
2724 if (state->intr.int_window_exiting) {
2725 vmx_event_waitexit_enable(vcpu, false);
2726 } else {
2727 vmx_event_waitexit_disable(vcpu, false);
2728 }
2729
2730 if (state->intr.nmi_window_exiting) {
2731 vmx_event_waitexit_enable(vcpu, true);
2732 } else {
2733 vmx_event_waitexit_disable(vcpu, true);
2734 }
2735 }
2736
2737 CTASSERT(sizeof(cpudata->gxsave.fpu) == sizeof(state->fpu));
2738 if (flags & NVMM_X64_STATE_FPU) {
2739 memcpy(&cpudata->gxsave.fpu, &state->fpu, sizeof(state->fpu));
2740
2741 fpustate = (struct nvmm_x64_state_fpu *)&cpudata->gxsave.fpu;
2742 fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
2743 fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
2744
2745 if (vmx_xcr0_mask != 0) {
2746 /* Reset XSTATE_BV, to force a reload. */
2747 cpudata->gxsave.xstate_bv = vmx_xcr0_mask;
2748 }
2749 }
2750
2751 vmx_vmcs_leave(vcpu);
2752
2753 comm->state_wanted = 0;
2754 comm->state_cached |= flags;
2755 }
2756
2757 static void
vmx_vcpu_getstate(struct nvmm_cpu * vcpu)2758 vmx_vcpu_getstate(struct nvmm_cpu *vcpu)
2759 {
2760 struct nvmm_comm_page *comm = vcpu->comm;
2761 struct nvmm_x64_state *state = &comm->state;
2762 struct vmx_cpudata *cpudata = vcpu->cpudata;
2763 struct msr_entry *gmsr = cpudata->gmsr;
2764 uint64_t intstate, flags;
2765
2766 flags = comm->state_wanted;
2767
2768 vmx_vmcs_enter(vcpu);
2769
2770 if (flags & NVMM_X64_STATE_SEGS) {
2771 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_CS);
2772 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_DS);
2773 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_ES);
2774 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_FS);
2775 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GS);
2776 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_SS);
2777 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_GDT);
2778 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_IDT);
2779 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_LDT);
2780 vmx_vcpu_getstate_seg(state->segs, NVMM_X64_SEG_TR);
2781 }
2782
2783 CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
2784 if (flags & NVMM_X64_STATE_GPRS) {
2785 memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
2786
2787 state->gprs[NVMM_X64_GPR_RIP] = vmx_vmread(VMCS_GUEST_RIP);
2788 state->gprs[NVMM_X64_GPR_RSP] = vmx_vmread(VMCS_GUEST_RSP);
2789 state->gprs[NVMM_X64_GPR_RFLAGS] = vmx_vmread(VMCS_GUEST_RFLAGS);
2790 }
2791
2792 if (flags & NVMM_X64_STATE_CRS) {
2793 state->crs[NVMM_X64_CR_CR0] =
2794 (vmx_vmread(VMCS_CR0_SHADOW) & CR0_STATIC_MASK) |
2795 (vmx_vmread(VMCS_GUEST_CR0) & ~CR0_STATIC_MASK);
2796 state->crs[NVMM_X64_CR_CR2] = cpudata->gcr2;
2797 state->crs[NVMM_X64_CR_CR3] = vmx_vmread(VMCS_GUEST_CR3);
2798 state->crs[NVMM_X64_CR_CR4] = vmx_vmread(VMCS_GUEST_CR4);
2799 state->crs[NVMM_X64_CR_CR8] = cpudata->gcr8;
2800 state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
2801
2802 /* Hide VMXE. */
2803 state->crs[NVMM_X64_CR_CR4] &= ~CR4_VMXE;
2804 }
2805
2806 CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
2807 if (flags & NVMM_X64_STATE_DRS) {
2808 memcpy(state->drs, cpudata->drs, sizeof(state->drs));
2809
2810 state->drs[NVMM_X64_DR_DR7] = vmx_vmread(VMCS_GUEST_DR7);
2811 }
2812
2813 if (flags & NVMM_X64_STATE_MSRS) {
2814 state->msrs[NVMM_X64_MSR_STAR] =
2815 gmsr[VMX_MSRLIST_STAR].val;
2816 state->msrs[NVMM_X64_MSR_LSTAR] =
2817 gmsr[VMX_MSRLIST_LSTAR].val;
2818 state->msrs[NVMM_X64_MSR_CSTAR] =
2819 gmsr[VMX_MSRLIST_CSTAR].val;
2820 state->msrs[NVMM_X64_MSR_SFMASK] =
2821 gmsr[VMX_MSRLIST_SFMASK].val;
2822 state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
2823 gmsr[VMX_MSRLIST_KERNELGSBASE].val;
2824 state->msrs[NVMM_X64_MSR_EFER] =
2825 vmx_vmread(VMCS_GUEST_IA32_EFER);
2826 state->msrs[NVMM_X64_MSR_PAT] =
2827 vmx_vmread(VMCS_GUEST_IA32_PAT);
2828 state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
2829 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_CS);
2830 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
2831 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_ESP);
2832 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
2833 vmx_vmread(VMCS_GUEST_IA32_SYSENTER_EIP);
2834 state->msrs[NVMM_X64_MSR_TSC] = rdtsc() + cpudata->gtsc_offset;
2835
2836 /* Save reported TSC value for later setstate check. */
2837 cpudata->gtsc_match = state->msrs[NVMM_X64_MSR_TSC];
2838 }
2839
2840 if (flags & NVMM_X64_STATE_INTR) {
2841 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2842 state->intr.int_shadow =
2843 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2844 state->intr.int_window_exiting = cpudata->int_window_exit;
2845 state->intr.nmi_window_exiting = cpudata->nmi_window_exit;
2846 state->intr.evt_pending = cpudata->evt_pending;
2847 }
2848
2849 CTASSERT(sizeof(cpudata->gxsave.fpu) == sizeof(state->fpu));
2850 if (flags & NVMM_X64_STATE_FPU) {
2851 memcpy(&state->fpu, &cpudata->gxsave.fpu, sizeof(state->fpu));
2852 }
2853
2854 vmx_vmcs_leave(vcpu);
2855
2856 comm->state_wanted = 0;
2857 comm->state_cached |= flags;
2858 }
2859
2860 static void
vmx_vcpu_state_provide(struct nvmm_cpu * vcpu,uint64_t flags)2861 vmx_vcpu_state_provide(struct nvmm_cpu *vcpu, uint64_t flags)
2862 {
2863 vcpu->comm->state_wanted = flags;
2864 vmx_vcpu_getstate(vcpu);
2865 }
2866
2867 static void
vmx_vcpu_state_commit(struct nvmm_cpu * vcpu)2868 vmx_vcpu_state_commit(struct nvmm_cpu *vcpu)
2869 {
2870 vcpu->comm->state_wanted = vcpu->comm->state_commit;
2871 vcpu->comm->state_commit = 0;
2872 vmx_vcpu_setstate(vcpu);
2873 }
2874
2875 /* -------------------------------------------------------------------------- */
2876
2877 static void
vmx_asid_alloc(struct nvmm_cpu * vcpu)2878 vmx_asid_alloc(struct nvmm_cpu *vcpu)
2879 {
2880 struct vmx_cpudata *cpudata = vcpu->cpudata;
2881 size_t i, oct, bit;
2882
2883 os_mtx_lock(&vmx_asidlock);
2884
2885 for (i = 0; i < vmx_maxasid; i++) {
2886 oct = i / 8;
2887 bit = i % 8;
2888
2889 if (vmx_asidmap[oct] & __BIT(bit)) {
2890 continue;
2891 }
2892
2893 cpudata->asid = i;
2894
2895 vmx_asidmap[oct] |= __BIT(bit);
2896 vmx_vmwrite(VMCS_VPID, i);
2897 os_mtx_unlock(&vmx_asidlock);
2898 return;
2899 }
2900
2901 os_mtx_unlock(&vmx_asidlock);
2902
2903 panic("%s: impossible", __func__);
2904 }
2905
2906 static void
vmx_asid_free(struct nvmm_cpu * vcpu)2907 vmx_asid_free(struct nvmm_cpu *vcpu)
2908 {
2909 size_t oct, bit;
2910 uint64_t asid;
2911
2912 asid = vmx_vmread(VMCS_VPID);
2913
2914 oct = asid / 8;
2915 bit = asid % 8;
2916
2917 os_mtx_lock(&vmx_asidlock);
2918 vmx_asidmap[oct] &= ~__BIT(bit);
2919 os_mtx_unlock(&vmx_asidlock);
2920 }
2921
2922 static void
vmx_vcpu_init(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)2923 vmx_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
2924 {
2925 struct vmx_cpudata *cpudata = vcpu->cpudata;
2926 struct vmcs *vmcs = cpudata->vmcs;
2927 struct msr_entry *gmsr = cpudata->gmsr;
2928 uint64_t rev, eptp;
2929
2930 rev = vmx_get_revision();
2931
2932 memset(vmcs, 0, VMCS_SIZE);
2933 vmcs->ident = __SHIFTIN(rev, VMCS_IDENT_REVISION);
2934 vmcs->abort = 0;
2935
2936 vmx_vmcs_enter(vcpu);
2937
2938 /* No link pointer. */
2939 vmx_vmwrite(VMCS_LINK_POINTER, 0xFFFFFFFFFFFFFFFF);
2940
2941 /* Install the CTLSs. */
2942 vmx_vmwrite(VMCS_PINBASED_CTLS, vmx_pinbased_ctls);
2943 vmx_vmwrite(VMCS_PROCBASED_CTLS, vmx_procbased_ctls);
2944 vmx_vmwrite(VMCS_PROCBASED_CTLS2, vmx_procbased_ctls2);
2945 vmx_vmwrite(VMCS_ENTRY_CTLS, vmx_entry_ctls);
2946 vmx_vmwrite(VMCS_EXIT_CTLS, vmx_exit_ctls);
2947
2948 /* Allow direct access to certain MSRs. */
2949 memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
2950 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, true);
2951 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
2952 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
2953 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
2954 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
2955 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
2956 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
2957 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
2958 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
2959 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
2960 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
2961 vmx_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
2962 vmx_vmwrite(VMCS_MSR_BITMAP, (uint64_t)cpudata->msrbm_pa);
2963
2964 /*
2965 * List of Guest MSRs loaded on VMENTRY, saved on VMEXIT. This
2966 * includes the L1D_FLUSH MSR, to mitigate L1TF.
2967 */
2968 gmsr[VMX_MSRLIST_STAR].msr = MSR_STAR;
2969 gmsr[VMX_MSRLIST_STAR].val = 0;
2970 gmsr[VMX_MSRLIST_LSTAR].msr = MSR_LSTAR;
2971 gmsr[VMX_MSRLIST_LSTAR].val = 0;
2972 gmsr[VMX_MSRLIST_CSTAR].msr = MSR_CSTAR;
2973 gmsr[VMX_MSRLIST_CSTAR].val = 0;
2974 gmsr[VMX_MSRLIST_SFMASK].msr = MSR_SFMASK;
2975 gmsr[VMX_MSRLIST_SFMASK].val = 0;
2976 gmsr[VMX_MSRLIST_KERNELGSBASE].msr = MSR_KERNELGSBASE;
2977 gmsr[VMX_MSRLIST_KERNELGSBASE].val = 0;
2978 gmsr[VMX_MSRLIST_L1DFLUSH].msr = MSR_IA32_FLUSH_CMD;
2979 gmsr[VMX_MSRLIST_L1DFLUSH].val = IA32_FLUSH_CMD_L1D_FLUSH;
2980 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_ADDRESS, cpudata->gmsr_pa);
2981 vmx_vmwrite(VMCS_EXIT_MSR_STORE_ADDRESS, cpudata->gmsr_pa);
2982 vmx_vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, vmx_msrlist_entry_nmsr);
2983 vmx_vmwrite(VMCS_EXIT_MSR_STORE_COUNT, VMX_MSRLIST_EXIT_NMSR);
2984
2985 /* Set the CR0 mask. Any change of these bits causes a VMEXIT. */
2986 vmx_vmwrite(VMCS_CR0_MASK, CR0_STATIC_MASK);
2987
2988 /* Force unsupported CR4 fields to zero. */
2989 vmx_vmwrite(VMCS_CR4_MASK, CR4_INVALID);
2990 vmx_vmwrite(VMCS_CR4_SHADOW, 0);
2991
2992 /* Set the Host state for resuming. */
2993 vmx_vmwrite(VMCS_HOST_RIP, (uint64_t)(uintptr_t)vmx_resume_rip);
2994 vmx_vmwrite(VMCS_HOST_CS_SELECTOR, GSEL(GCODE_SEL, SEL_KPL));
2995 vmx_vmwrite(VMCS_HOST_SS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2996 vmx_vmwrite(VMCS_HOST_DS_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2997 vmx_vmwrite(VMCS_HOST_ES_SELECTOR, GSEL(GDATA_SEL, SEL_KPL));
2998 vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0);
2999 vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0);
3000 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_CS, 0);
3001 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_ESP, 0);
3002 vmx_vmwrite(VMCS_HOST_IA32_SYSENTER_EIP, 0);
3003 vmx_vmwrite(VMCS_HOST_IA32_PAT, rdmsr(MSR_CR_PAT));
3004 vmx_vmwrite(VMCS_HOST_IA32_EFER, rdmsr(MSR_EFER));
3005 vmx_vmwrite(VMCS_HOST_CR0, x86_get_cr0() & ~CR0_TS);
3006
3007 /* Generate ASID. */
3008 vmx_asid_alloc(vcpu);
3009
3010 /* Enable Extended Paging, 4-Level. */
3011 eptp =
3012 __SHIFTIN(vmx_eptp_type, EPTP_TYPE) |
3013 __SHIFTIN(4-1, EPTP_WALKLEN) |
3014 (vmx_ept_has_ad ? EPTP_FLAGS_AD : 0) |
3015 os_vmspace_pdirpa(mach->vm);
3016 vmx_vmwrite(VMCS_EPTP, eptp);
3017
3018 /* Init IA32_MISC_ENABLE. */
3019 cpudata->gmsr_misc_enable = rdmsr(MSR_MISC_ENABLE);
3020 cpudata->gmsr_misc_enable &=
3021 ~(IA32_MISC_PERFMON_EN|IA32_MISC_EISST_EN|IA32_MISC_MWAIT_EN);
3022 cpudata->gmsr_misc_enable |=
3023 (IA32_MISC_BTS_UNAVAIL|IA32_MISC_PEBS_UNAVAIL);
3024
3025 /* Init XSAVE header. */
3026 cpudata->gxsave.xstate_bv = vmx_xcr0_mask;
3027 cpudata->gxsave.xcomp_bv = 0;
3028
3029 /* Install the RESET state. */
3030 memcpy(&vcpu->comm->state, &nvmm_x86_reset_state,
3031 sizeof(nvmm_x86_reset_state));
3032 vcpu->comm->state_wanted = NVMM_X64_STATE_ALL;
3033 vcpu->comm->state_cached = 0;
3034 vmx_vcpu_setstate(vcpu);
3035
3036 vmx_vmcs_leave(vcpu);
3037 }
3038
3039 static int
vmx_vcpu_create(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)3040 vmx_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3041 {
3042 struct vmx_cpudata *cpudata;
3043 int error;
3044
3045 /* Allocate the VMX cpudata. */
3046 cpudata = (struct vmx_cpudata *)os_pagemem_zalloc(sizeof(*cpudata));
3047 if (cpudata == NULL)
3048 return ENOMEM;
3049
3050 vcpu->cpudata = cpudata;
3051
3052 /* VMCS */
3053 error = os_contigpa_zalloc(&cpudata->vmcs_pa,
3054 (vaddr_t *)&cpudata->vmcs, VMCS_NPAGES);
3055 if (error)
3056 goto error;
3057
3058 /* MSR Bitmap */
3059 error = os_contigpa_zalloc(&cpudata->msrbm_pa,
3060 (vaddr_t *)&cpudata->msrbm, MSRBM_NPAGES);
3061 if (error)
3062 goto error;
3063
3064 /* Guest MSR List */
3065 error = os_contigpa_zalloc(&cpudata->gmsr_pa,
3066 (vaddr_t *)&cpudata->gmsr, 1);
3067 if (error)
3068 goto error;
3069
3070 os_cpuset_init(&cpudata->htlb_want_flush);
3071
3072 /* Init the VCPU info. */
3073 vmx_vcpu_init(mach, vcpu);
3074
3075 return 0;
3076
3077 error:
3078 if (cpudata->vmcs_pa) {
3079 os_contigpa_free(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
3080 VMCS_NPAGES);
3081 }
3082 if (cpudata->msrbm_pa) {
3083 os_contigpa_free(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
3084 MSRBM_NPAGES);
3085 }
3086 if (cpudata->gmsr_pa) {
3087 os_contigpa_free(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr, 1);
3088 }
3089 os_pagemem_free(cpudata, sizeof(*cpudata));
3090 return error;
3091 }
3092
3093 static void
vmx_vcpu_destroy(struct nvmm_machine * mach,struct nvmm_cpu * vcpu)3094 vmx_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
3095 {
3096 struct vmx_cpudata *cpudata = vcpu->cpudata;
3097
3098 vmx_vmcs_enter(vcpu);
3099 vmx_asid_free(vcpu);
3100 vmx_vmcs_destroy(vcpu);
3101
3102 os_cpuset_destroy(cpudata->htlb_want_flush);
3103
3104 os_contigpa_free(cpudata->vmcs_pa, (vaddr_t)cpudata->vmcs,
3105 VMCS_NPAGES);
3106 os_contigpa_free(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
3107 MSRBM_NPAGES);
3108 os_contigpa_free(cpudata->gmsr_pa, (vaddr_t)cpudata->gmsr,
3109 1);
3110 os_pagemem_free(cpudata, sizeof(*cpudata));
3111 }
3112
3113 /* -------------------------------------------------------------------------- */
3114
3115 static int
vmx_vcpu_configure_cpuid(struct vmx_cpudata * cpudata,void * data)3116 vmx_vcpu_configure_cpuid(struct vmx_cpudata *cpudata, void *data)
3117 {
3118 struct nvmm_vcpu_conf_cpuid *cpuid = data;
3119 size_t i;
3120
3121 if (__predict_false(cpuid->mask && cpuid->exit)) {
3122 return EINVAL;
3123 }
3124 if (__predict_false(cpuid->mask &&
3125 ((cpuid->u.mask.set.eax & cpuid->u.mask.del.eax) ||
3126 (cpuid->u.mask.set.ebx & cpuid->u.mask.del.ebx) ||
3127 (cpuid->u.mask.set.ecx & cpuid->u.mask.del.ecx) ||
3128 (cpuid->u.mask.set.edx & cpuid->u.mask.del.edx)))) {
3129 return EINVAL;
3130 }
3131
3132 /* If unset, delete, to restore the default behavior. */
3133 if (!cpuid->mask && !cpuid->exit) {
3134 for (i = 0; i < VMX_NCPUIDS; i++) {
3135 if (!cpudata->cpuidpresent[i]) {
3136 continue;
3137 }
3138 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3139 cpudata->cpuidpresent[i] = false;
3140 }
3141 }
3142 return 0;
3143 }
3144
3145 /* If already here, replace. */
3146 for (i = 0; i < VMX_NCPUIDS; i++) {
3147 if (!cpudata->cpuidpresent[i]) {
3148 continue;
3149 }
3150 if (cpudata->cpuid[i].leaf == cpuid->leaf) {
3151 memcpy(&cpudata->cpuid[i], cpuid,
3152 sizeof(struct nvmm_vcpu_conf_cpuid));
3153 return 0;
3154 }
3155 }
3156
3157 /* Not here, insert. */
3158 for (i = 0; i < VMX_NCPUIDS; i++) {
3159 if (!cpudata->cpuidpresent[i]) {
3160 cpudata->cpuidpresent[i] = true;
3161 memcpy(&cpudata->cpuid[i], cpuid,
3162 sizeof(struct nvmm_vcpu_conf_cpuid));
3163 return 0;
3164 }
3165 }
3166
3167 return ENOBUFS;
3168 }
3169
3170 static int
vmx_vcpu_configure_tpr(struct vmx_cpudata * cpudata,void * data)3171 vmx_vcpu_configure_tpr(struct vmx_cpudata *cpudata, void *data)
3172 {
3173 struct nvmm_vcpu_conf_tpr *tpr = data;
3174
3175 memcpy(&cpudata->tpr, tpr, sizeof(*tpr));
3176 return 0;
3177 }
3178
3179 static int
vmx_vcpu_configure(struct nvmm_cpu * vcpu,uint64_t op,void * data)3180 vmx_vcpu_configure(struct nvmm_cpu *vcpu, uint64_t op, void *data)
3181 {
3182 struct vmx_cpudata *cpudata = vcpu->cpudata;
3183
3184 switch (op) {
3185 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_CPUID):
3186 return vmx_vcpu_configure_cpuid(cpudata, data);
3187 case NVMM_VCPU_CONF_MD(NVMM_VCPU_CONF_TPR):
3188 return vmx_vcpu_configure_tpr(cpudata, data);
3189 default:
3190 return EINVAL;
3191 }
3192 }
3193
3194 /* -------------------------------------------------------------------------- */
3195
3196 #ifdef __NetBSD__
3197 static void
vmx_tlb_flush(struct pmap * pm)3198 vmx_tlb_flush(struct pmap *pm)
3199 {
3200 struct nvmm_machine *mach = os_pmap_mach(pm);
3201 struct vmx_machdata *machdata = mach->machdata;
3202
3203 os_atomic_inc_64(&machdata->mach_htlb_gen);
3204
3205 /*
3206 * Send a dummy IPI to each CPU. The IPIs cause #VMEXITs. Afterwards the
3207 * VCPU loops will see that their 'vcpu_htlb_gen' is out of sync, and
3208 * will each flush their own TLB.
3209 */
3210 os_ipi_kickall();
3211 }
3212 #endif
3213
3214 static void
vmx_machine_create(struct nvmm_machine * mach)3215 vmx_machine_create(struct nvmm_machine *mach)
3216 {
3217 struct pmap *pmap = os_vmspace_pmap(mach->vm);
3218 struct vmx_machdata *machdata;
3219
3220 /* Transform into an EPT pmap. */
3221 #if defined(__NetBSD__)
3222 pmap_ept_transform(pmap);
3223 os_pmap_mach(pmap) = (void *)mach;
3224 pmap->pm_tlb_flush = vmx_tlb_flush;
3225 #elif defined(__DragonFly__)
3226 pmap_ept_transform(pmap, vmx_ept_has_ad ? 0 : PMAP_EMULATE_AD_BITS);
3227 #endif
3228
3229 machdata = os_mem_zalloc(sizeof(struct vmx_machdata));
3230 mach->machdata = machdata;
3231
3232 /* Start with an hTLB flush everywhere. */
3233 machdata->mach_htlb_gen = 1;
3234 }
3235
3236 static void
vmx_machine_destroy(struct nvmm_machine * mach)3237 vmx_machine_destroy(struct nvmm_machine *mach)
3238 {
3239 os_mem_free(mach->machdata, sizeof(struct vmx_machdata));
3240 }
3241
3242 static int
vmx_machine_configure(struct nvmm_machine * mach,uint64_t op,void * data)3243 vmx_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
3244 {
3245 panic("%s: impossible", __func__);
3246 }
3247
3248 /* -------------------------------------------------------------------------- */
3249
3250 #define CTLS_ONE_ALLOWED(msrval, bitoff) \
3251 ((msrval & __BIT(32 + bitoff)) != 0)
3252 #define CTLS_ZERO_ALLOWED(msrval, bitoff) \
3253 ((msrval & __BIT(bitoff)) == 0)
3254
3255 static int
vmx_check_ctls(uint64_t msr_ctls,uint64_t msr_true_ctls,uint64_t set_one)3256 vmx_check_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls, uint64_t set_one)
3257 {
3258 uint64_t basic, val, true_val;
3259 bool has_true;
3260 size_t i;
3261
3262 basic = rdmsr(MSR_IA32_VMX_BASIC);
3263 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3264
3265 val = rdmsr(msr_ctls);
3266 if (has_true) {
3267 true_val = rdmsr(msr_true_ctls);
3268 } else {
3269 true_val = val;
3270 }
3271
3272 for (i = 0; i < 32; i++) {
3273 if (!(set_one & __BIT(i))) {
3274 continue;
3275 }
3276 if (!CTLS_ONE_ALLOWED(true_val, i)) {
3277 return -1;
3278 }
3279 }
3280
3281 return 0;
3282 }
3283
3284 static int
vmx_init_ctls(uint64_t msr_ctls,uint64_t msr_true_ctls,uint64_t set_one,uint64_t set_zero,uint64_t * res)3285 vmx_init_ctls(uint64_t msr_ctls, uint64_t msr_true_ctls,
3286 uint64_t set_one, uint64_t set_zero, uint64_t *res)
3287 {
3288 uint64_t basic, val, true_val;
3289 bool one_allowed, zero_allowed, has_true;
3290 size_t i;
3291
3292 basic = rdmsr(MSR_IA32_VMX_BASIC);
3293 has_true = (basic & IA32_VMX_BASIC_TRUE_CTLS) != 0;
3294
3295 val = rdmsr(msr_ctls);
3296 if (has_true) {
3297 true_val = rdmsr(msr_true_ctls);
3298 } else {
3299 true_val = val;
3300 }
3301
3302 for (i = 0; i < 32; i++) {
3303 one_allowed = CTLS_ONE_ALLOWED(true_val, i);
3304 zero_allowed = CTLS_ZERO_ALLOWED(true_val, i);
3305
3306 if (zero_allowed && !one_allowed) {
3307 if (set_one & __BIT(i))
3308 return -1;
3309 *res &= ~__BIT(i);
3310 } else if (one_allowed && !zero_allowed) {
3311 if (set_zero & __BIT(i))
3312 return -1;
3313 *res |= __BIT(i);
3314 } else {
3315 if (set_zero & __BIT(i)) {
3316 *res &= ~__BIT(i);
3317 } else if (set_one & __BIT(i)) {
3318 *res |= __BIT(i);
3319 } else if (!has_true) {
3320 *res &= ~__BIT(i);
3321 } else if (CTLS_ZERO_ALLOWED(val, i)) {
3322 *res &= ~__BIT(i);
3323 } else if (CTLS_ONE_ALLOWED(val, i)) {
3324 *res |= __BIT(i);
3325 } else {
3326 return -1;
3327 }
3328 }
3329 }
3330
3331 return 0;
3332 }
3333
3334 static bool
vmx_ident(void)3335 vmx_ident(void)
3336 {
3337 cpuid_desc_t descs;
3338 uint64_t msr;
3339 int ret;
3340
3341 x86_get_cpuid(0x00000001, &descs);
3342 if (!(descs.ecx & CPUID_0_01_ECX_VMX)) {
3343 return false;
3344 }
3345
3346 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3347 if ((msr & IA32_FEATURE_CONTROL_LOCK) != 0 &&
3348 (msr & IA32_FEATURE_CONTROL_OUT_SMX) == 0) {
3349 os_printf("nvmm: VMX disabled in BIOS\n");
3350 return false;
3351 }
3352
3353 msr = rdmsr(MSR_IA32_VMX_BASIC);
3354 if ((msr & IA32_VMX_BASIC_IO_REPORT) == 0) {
3355 os_printf("nvmm: I/O reporting not supported\n");
3356 return false;
3357 }
3358 if (__SHIFTOUT(msr, IA32_VMX_BASIC_MEM_TYPE) != MEM_TYPE_WB) {
3359 os_printf("nvmm: WB memory not supported\n");
3360 return false;
3361 }
3362
3363 /* PG and PE are reported, even if Unrestricted Guests is supported. */
3364 vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0) & ~(CR0_PG|CR0_PE);
3365 vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1) | (CR0_PG|CR0_PE);
3366 ret = vmx_check_cr(x86_get_cr0(), vmx_cr0_fixed0, vmx_cr0_fixed1);
3367 if (ret == -1) {
3368 os_printf("nvmm: CR0 requirements not satisfied\n");
3369 return false;
3370 }
3371
3372 vmx_cr4_fixed0 = rdmsr(MSR_IA32_VMX_CR4_FIXED0);
3373 vmx_cr4_fixed1 = rdmsr(MSR_IA32_VMX_CR4_FIXED1);
3374 ret = vmx_check_cr(x86_get_cr4() | CR4_VMXE, vmx_cr4_fixed0,
3375 vmx_cr4_fixed1);
3376 if (ret == -1) {
3377 os_printf("nvmm: CR4 requirements not satisfied\n");
3378 return false;
3379 }
3380
3381 /* Init the CTLSs right now, and check for errors. */
3382 ret = vmx_init_ctls(
3383 MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3384 VMX_PINBASED_CTLS_ONE, VMX_PINBASED_CTLS_ZERO,
3385 &vmx_pinbased_ctls);
3386 if (ret == -1) {
3387 os_printf("nvmm: pin-based-ctls requirements not satisfied\n");
3388 return false;
3389 }
3390 ret = vmx_init_ctls(
3391 MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3392 VMX_PROCBASED_CTLS_ONE, VMX_PROCBASED_CTLS_ZERO,
3393 &vmx_procbased_ctls);
3394 if (ret == -1) {
3395 os_printf("nvmm: proc-based-ctls requirements not satisfied\n");
3396 return false;
3397 }
3398 ret = vmx_init_ctls(
3399 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3400 VMX_PROCBASED_CTLS2_ONE, VMX_PROCBASED_CTLS2_ZERO,
3401 &vmx_procbased_ctls2);
3402 if (ret == -1) {
3403 os_printf("nvmm: proc-based-ctls2 requirements not satisfied\n");
3404 return false;
3405 }
3406 ret = vmx_check_ctls(
3407 MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_PROCBASED_CTLS2,
3408 PROC_CTLS2_INVPCID_ENABLE);
3409 if (ret != -1) {
3410 vmx_procbased_ctls2 |= PROC_CTLS2_INVPCID_ENABLE;
3411 }
3412 ret = vmx_init_ctls(
3413 MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3414 VMX_ENTRY_CTLS_ONE, VMX_ENTRY_CTLS_ZERO,
3415 &vmx_entry_ctls);
3416 if (ret == -1) {
3417 os_printf("nvmm: entry-ctls requirements not satisfied\n");
3418 return false;
3419 }
3420 ret = vmx_init_ctls(
3421 MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3422 VMX_EXIT_CTLS_ONE, VMX_EXIT_CTLS_ZERO,
3423 &vmx_exit_ctls);
3424 if (ret == -1) {
3425 os_printf("nvmm: exit-ctls requirements not satisfied\n");
3426 return false;
3427 }
3428
3429 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3430 if ((msr & IA32_VMX_EPT_VPID_WALKLENGTH_4) == 0) {
3431 os_printf("nvmm: 4-level page tree not supported\n");
3432 return false;
3433 }
3434 if ((msr & IA32_VMX_EPT_VPID_INVEPT) == 0) {
3435 os_printf("nvmm: INVEPT not supported\n");
3436 return false;
3437 }
3438 if ((msr & IA32_VMX_EPT_VPID_INVVPID) == 0) {
3439 os_printf("nvmm: INVVPID not supported\n");
3440 return false;
3441 }
3442 if ((msr & IA32_VMX_EPT_VPID_FLAGS_AD) != 0) {
3443 vmx_ept_has_ad = true;
3444 } else {
3445 vmx_ept_has_ad = false;
3446 }
3447 #ifdef __NetBSD__
3448 pmap_ept_has_ad = vmx_ept_has_ad;
3449 #endif
3450 if (!(msr & IA32_VMX_EPT_VPID_UC) && !(msr & IA32_VMX_EPT_VPID_WB)) {
3451 os_printf("nvmm: EPT UC/WB memory types not supported\n");
3452 return false;
3453 }
3454
3455 return true;
3456 }
3457
3458 static void
vmx_init_asid(uint32_t maxasid)3459 vmx_init_asid(uint32_t maxasid)
3460 {
3461 size_t allocsz;
3462
3463 os_mtx_init(&vmx_asidlock);
3464
3465 vmx_maxasid = maxasid;
3466 allocsz = roundup(maxasid, 8) / 8;
3467 vmx_asidmap = os_mem_zalloc(allocsz);
3468
3469 /* ASID 0 is reserved for the host. */
3470 vmx_asidmap[0] |= __BIT(0);
3471 }
3472
3473 static
OS_IPI_FUNC(vmx_change_cpu)3474 OS_IPI_FUNC(vmx_change_cpu)
3475 {
3476 bool enable = arg != NULL;
3477 uint64_t msr, cr4;
3478
3479 if (enable) {
3480 msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
3481 if ((msr & IA32_FEATURE_CONTROL_LOCK) == 0) {
3482 /* Lock now, with VMX-outside-SMX enabled. */
3483 wrmsr(MSR_IA32_FEATURE_CONTROL, msr |
3484 IA32_FEATURE_CONTROL_LOCK |
3485 IA32_FEATURE_CONTROL_OUT_SMX);
3486 }
3487 }
3488
3489 if (!enable) {
3490 vmx_vmxoff();
3491 }
3492
3493 cr4 = x86_get_cr4();
3494 if (enable) {
3495 cr4 |= CR4_VMXE;
3496 } else {
3497 cr4 &= ~CR4_VMXE;
3498 }
3499 x86_set_cr4(cr4);
3500
3501 if (enable) {
3502 vmx_vmxon(&vmxoncpu[os_curcpu_number()].pa);
3503 }
3504 }
3505
3506 static void
vmx_init_l1tf(void)3507 vmx_init_l1tf(void)
3508 {
3509 cpuid_desc_t descs;
3510 uint64_t msr;
3511
3512 x86_get_cpuid(0x00000000, &descs);
3513 if (descs.eax < 7) {
3514 return;
3515 }
3516
3517 x86_get_cpuid(0x00000007, &descs);
3518
3519 if (descs.edx & CPUID_0_07_EDX_ARCH_CAP) {
3520 msr = rdmsr(MSR_IA32_ARCH_CAPABILITIES);
3521 if (msr & IA32_ARCH_SKIP_L1DFL_VMENTRY) {
3522 /* No mitigation needed. */
3523 return;
3524 }
3525 }
3526
3527 if (descs.edx & CPUID_0_07_EDX_L1D_FLUSH) {
3528 /* Enable hardware mitigation. */
3529 vmx_msrlist_entry_nmsr += 1;
3530 }
3531 }
3532
3533 static void
vmx_init(void)3534 vmx_init(void)
3535 {
3536 struct vmxon *vmxon;
3537 uint32_t revision;
3538 cpuid_desc_t descs;
3539 os_cpu_t *cpu;
3540 uint64_t msr;
3541 paddr_t pa;
3542 vaddr_t va;
3543 int error;
3544
3545 /* Init the ASID bitmap (VPID). */
3546 vmx_init_asid(VPID_MAX);
3547
3548 /* Init the XCR0 mask. */
3549 vmx_xcr0_mask = VMX_XCR0_MASK_DEFAULT & x86_xsave_features;
3550
3551 /* Init the max basic CPUID leaf. */
3552 x86_get_cpuid(0x00000000, &descs);
3553 vmx_cpuid_max_basic = uimin(descs.eax, VMX_CPUID_MAX_BASIC);
3554
3555 /* Init the max extended CPUID leaf. */
3556 x86_get_cpuid(0x80000000, &descs);
3557 vmx_cpuid_max_extended = uimin(descs.eax, VMX_CPUID_MAX_EXTENDED);
3558
3559 /* Init the TLB flush op, the EPT flush op and the EPTP type. */
3560 msr = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP);
3561 if ((msr & IA32_VMX_EPT_VPID_INVVPID_CONTEXT) != 0) {
3562 vmx_tlb_flush_op = VMX_INVVPID_CONTEXT;
3563 } else {
3564 vmx_tlb_flush_op = VMX_INVVPID_ALL;
3565 }
3566 if ((msr & IA32_VMX_EPT_VPID_INVEPT_CONTEXT) != 0) {
3567 vmx_ept_flush_op = VMX_INVEPT_CONTEXT;
3568 } else {
3569 vmx_ept_flush_op = VMX_INVEPT_ALL;
3570 }
3571 if ((msr & IA32_VMX_EPT_VPID_WB) != 0) {
3572 vmx_eptp_type = EPTP_TYPE_WB;
3573 } else {
3574 vmx_eptp_type = EPTP_TYPE_UC;
3575 }
3576
3577 /* Init the L1TF mitigation. */
3578 vmx_init_l1tf();
3579
3580 /* Init the global host state. */
3581 if (vmx_xcr0_mask != 0) {
3582 vmx_global_hstate.xcr0 = x86_get_xcr(0);
3583 }
3584 vmx_global_hstate.star = rdmsr(MSR_STAR);
3585 vmx_global_hstate.lstar = rdmsr(MSR_LSTAR);
3586 vmx_global_hstate.cstar = rdmsr(MSR_CSTAR);
3587 vmx_global_hstate.sfmask = rdmsr(MSR_SFMASK);
3588
3589 memset(vmxoncpu, 0, sizeof(vmxoncpu));
3590 revision = vmx_get_revision();
3591
3592 OS_CPU_FOREACH(cpu) {
3593 error = os_contigpa_zalloc(&pa, &va, 1);
3594 if (error) {
3595 panic("%s: out of memory", __func__);
3596 }
3597 vmxoncpu[os_cpu_number(cpu)].pa = pa;
3598 vmxoncpu[os_cpu_number(cpu)].va = va;
3599
3600 vmxon = (struct vmxon *)vmxoncpu[os_cpu_number(cpu)].va;
3601 vmxon->ident = __SHIFTIN(revision, VMXON_IDENT_REVISION);
3602 }
3603
3604 os_ipi_broadcast(vmx_change_cpu, (void *)true);
3605 }
3606
3607 static void
vmx_fini_asid(void)3608 vmx_fini_asid(void)
3609 {
3610 size_t allocsz;
3611
3612 allocsz = roundup(vmx_maxasid, 8) / 8;
3613 os_mem_free(vmx_asidmap, allocsz);
3614
3615 os_mtx_destroy(&vmx_asidlock);
3616 }
3617
3618 static void
vmx_fini(void)3619 vmx_fini(void)
3620 {
3621 size_t i;
3622
3623 os_ipi_broadcast(vmx_change_cpu, (void *)false);
3624
3625 for (i = 0; i < OS_MAXCPUS; i++) {
3626 if (vmxoncpu[i].pa != 0)
3627 os_contigpa_free(vmxoncpu[i].pa, vmxoncpu[i].va, 1);
3628 }
3629
3630 vmx_fini_asid();
3631 }
3632
3633 static void
vmx_capability(struct nvmm_capability * cap)3634 vmx_capability(struct nvmm_capability *cap)
3635 {
3636 cap->arch.mach_conf_support = 0;
3637 cap->arch.vcpu_conf_support =
3638 NVMM_CAP_ARCH_VCPU_CONF_CPUID |
3639 NVMM_CAP_ARCH_VCPU_CONF_TPR;
3640 cap->arch.xcr0_mask = vmx_xcr0_mask;
3641 cap->arch.mxcsr_mask = x86_fpu_mxcsr_mask;
3642 cap->arch.conf_cpuid_maxops = VMX_NCPUIDS;
3643 }
3644
3645 const struct nvmm_impl nvmm_x86_vmx = {
3646 .name = "x86-vmx",
3647 .ident = vmx_ident,
3648 .init = vmx_init,
3649 .fini = vmx_fini,
3650 .capability = vmx_capability,
3651 .mach_conf_max = NVMM_X86_MACH_NCONF,
3652 .mach_conf_sizes = NULL,
3653 .vcpu_conf_max = NVMM_X86_VCPU_NCONF,
3654 .vcpu_conf_sizes = vmx_vcpu_conf_sizes,
3655 .state_size = sizeof(struct nvmm_x64_state),
3656 .machine_create = vmx_machine_create,
3657 .machine_destroy = vmx_machine_destroy,
3658 .machine_configure = vmx_machine_configure,
3659 .vcpu_create = vmx_vcpu_create,
3660 .vcpu_destroy = vmx_vcpu_destroy,
3661 .vcpu_configure = vmx_vcpu_configure,
3662 .vcpu_setstate = vmx_vcpu_setstate,
3663 .vcpu_getstate = vmx_vcpu_getstate,
3664 .vcpu_inject = vmx_vcpu_inject,
3665 .vcpu_run = vmx_vcpu_run
3666 };
3667