1 /*
2  * Copyright (c) 2018-2021 Maxime Villard, m00nbsd.net
3  * All rights reserved.
4  *
5  * This code is part of the NVMM hypervisor.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <err.h>
36 #include <errno.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <machine/segments.h>
40 #include <machine/psl.h>
41 
42 #include <nvmm.h>
43 
44 #ifdef __NetBSD__
45 
46 #include <machine/pte.h>
47 #define PAGE_SIZE 4096
48 
49 #else /* DragonFly */
50 
51 #include <machine/pmap.h>
52 #define PTE_P		X86_PG_V	/* 0x001: P (Valid) */
53 #define PTE_W		X86_PG_RW	/* 0x002: R/W (Read/Write) */
54 #define PSL_MBO		PSL_RESERVED_DEFAULT	/* 0x00000002 */
55 #define SDT_SYS386BSY	SDT_SYSBSY	/* 11: system 64-bit TSS busy */
56 
57 #endif /* __NetBSD__ */
58 
59 static uint8_t mmiobuf[PAGE_SIZE];
60 static uint8_t *instbuf;
61 
62 /* -------------------------------------------------------------------------- */
63 
64 static void
65 mem_callback(struct nvmm_mem *mem)
66 {
67 	size_t off;
68 
69 	if (mem->gpa < 0x1000 || mem->gpa + mem->size > 0x1000 + PAGE_SIZE) {
70 		printf("Out of page\n");
71 		exit(-1);
72 	}
73 
74 	off = mem->gpa - 0x1000;
75 
76 	printf("-> gpa = %p\n", (void *)mem->gpa);
77 
78 	if (mem->write) {
79 		memcpy(mmiobuf + off, mem->data, mem->size);
80 	} else {
81 		memcpy(mem->data, mmiobuf + off, mem->size);
82 	}
83 }
84 
85 static int
86 handle_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
87 {
88 	int ret;
89 
90 	ret = nvmm_assist_mem(mach, vcpu);
91 	if (ret == -1) {
92 		err(errno, "nvmm_assist_mem");
93 	}
94 
95 	return 0;
96 }
97 
98 static void
99 run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
100 {
101 	struct nvmm_vcpu_exit *exit = vcpu->exit;
102 
103 	while (1) {
104 		if (nvmm_vcpu_run(mach, vcpu) == -1)
105 			err(errno, "nvmm_vcpu_run");
106 
107 		switch (exit->reason) {
108 		case NVMM_VCPU_EXIT_NONE:
109 			break;
110 
111 		case NVMM_VCPU_EXIT_RDMSR:
112 			/* Stop here. */
113 			return;
114 
115 		case NVMM_VCPU_EXIT_MEMORY:
116 			handle_memory(mach, vcpu);
117 			break;
118 
119 		case NVMM_VCPU_EXIT_SHUTDOWN:
120 			printf("Shutting down!\n");
121 			return;
122 
123 		default:
124 			printf("Invalid VMEXIT: 0x%lx\n", exit->reason);
125 			return;
126 		}
127 	}
128 }
129 
130 static struct nvmm_assist_callbacks callbacks = {
131 	.io = NULL,
132 	.mem = mem_callback
133 };
134 
135 /* -------------------------------------------------------------------------- */
136 
137 struct test {
138 	const char *name;
139 	uint8_t *code_begin;
140 	uint8_t *code_end;
141 	uint64_t wanted;
142 	uint64_t off;
143 };
144 
145 static void
146 run_test(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
147     const struct test *test)
148 {
149 	uint64_t *res;
150 	size_t size;
151 
152 	size = (size_t)test->code_end - (size_t)test->code_begin;
153 
154 	memset(mmiobuf, 0, PAGE_SIZE);
155 	memcpy(instbuf, test->code_begin, size);
156 
157 	run_machine(mach, vcpu);
158 
159 	res = (uint64_t *)(mmiobuf + test->off);
160 	if (*res == test->wanted) {
161 		printf("Test '%s' passed\n", test->name);
162 	} else {
163 		printf("Test '%s' failed, wanted 0x%lx, got 0x%lx\n", test->name,
164 		    test->wanted, *res);
165 		errx(-1, "run_test failed");
166 	}
167 }
168 
169 /* -------------------------------------------------------------------------- */
170 
171 extern uint8_t test1_begin, test1_end;
172 extern uint8_t test2_begin, test2_end;
173 extern uint8_t test3_begin, test3_end;
174 extern uint8_t test4_begin, test4_end;
175 extern uint8_t test5_begin, test5_end;
176 extern uint8_t test6_begin, test6_end;
177 extern uint8_t test7_begin, test7_end;
178 extern uint8_t test8_begin, test8_end;
179 extern uint8_t test9_begin, test9_end;
180 extern uint8_t test10_begin, test10_end;
181 extern uint8_t test11_begin, test11_end;
182 extern uint8_t test12_begin, test12_end;
183 extern uint8_t test13_begin, test13_end;
184 extern uint8_t test14_begin, test14_end;
185 extern uint8_t test_64bit_15_begin, test_64bit_15_end;
186 extern uint8_t test_64bit_16_begin, test_64bit_16_end;
187 
188 static const struct test tests64[] = {
189 	{ "64bit test1 - MOV", &test1_begin, &test1_end, 0x3004, 0 },
190 	{ "64bit test2 - OR",  &test2_begin, &test2_end, 0x16FF, 0 },
191 	{ "64bit test3 - AND", &test3_begin, &test3_end, 0x1FC0, 0 },
192 	{ "64bit test4 - XOR", &test4_begin, &test4_end, 0x10CF, 0 },
193 	{ "64bit test5 - Address Sizes", &test5_begin, &test5_end, 0x1F00, 0 },
194 	{ "64bit test6 - DMO", &test6_begin, &test6_end, 0xFFAB, 0 },
195 	{ "64bit test7 - STOS", &test7_begin, &test7_end, 0x00123456, 0 },
196 	{ "64bit test8 - LODS", &test8_begin, &test8_end, 0x12345678, 0 },
197 	{ "64bit test9 - MOVS", &test9_begin, &test9_end, 0x12345678, 0 },
198 	{ "64bit test10 - MOVZXB", &test10_begin, &test10_end, 0x00000078, 0 },
199 	{ "64bit test11 - MOVZXW", &test11_begin, &test11_end, 0x00005678, 0 },
200 	{ "64bit test12 - CMP", &test12_begin, &test12_end, 0x00000001, 0 },
201 	{ "64bit test13 - SUB", &test13_begin, &test13_end, 0x0000000F0000A0FF, 0 },
202 	{ "64bit test14 - TEST", &test14_begin, &test14_end, 0x00000001, 0 },
203 	{ "64bit test15 - XCHG", &test_64bit_15_begin, &test_64bit_15_end, 0x123456, 0 },
204 	{ "64bit test16 - XCHG", &test_64bit_16_begin, &test_64bit_16_end,
205 	  0x123456, 0 },
206 	{ NULL, NULL, NULL, -1, 0 }
207 };
208 
209 static void
210 init_seg(struct nvmm_x64_state_seg *seg, int type, int sel)
211 {
212 	seg->selector = sel;
213 	seg->attrib.type = type;
214 	seg->attrib.s = (type & 0b10000) != 0;
215 	seg->attrib.dpl = 0;
216 	seg->attrib.p = 1;
217 	seg->attrib.avl = 1;
218 	seg->attrib.l = 1;
219 	seg->attrib.def = 0;
220 	seg->attrib.g = 1;
221 	seg->limit = 0x0000FFFF;
222 	seg->base = 0x00000000;
223 }
224 
225 static void
226 reset_machine64(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
227 {
228 	struct nvmm_x64_state *state = vcpu->state;
229 
230 	if (nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
231 		err(errno, "nvmm_vcpu_getstate");
232 
233 	memset(state, 0, sizeof(*state));
234 
235 	/* Default. */
236 	state->gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
237 	init_seg(&state->segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
238 	init_seg(&state->segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
239 	init_seg(&state->segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
240 	init_seg(&state->segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
241 	init_seg(&state->segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
242 	init_seg(&state->segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
243 
244 	/* Blank. */
245 	init_seg(&state->segs[NVMM_X64_SEG_GDT], 0, 0);
246 	init_seg(&state->segs[NVMM_X64_SEG_IDT], 0, 0);
247 	init_seg(&state->segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
248 	init_seg(&state->segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
249 
250 	/* Protected mode enabled. */
251 	state->crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
252 
253 	/* 64bit mode enabled. */
254 	state->crs[NVMM_X64_CR_CR4] = CR4_PAE;
255 	state->msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
256 
257 	/* Stolen from x86/pmap.c */
258 #define	PATENTRY(n, type)	(type << ((n) * 8))
259 #define	PAT_UC		0x0ULL
260 #define	PAT_WC		0x1ULL
261 #define	PAT_WT		0x4ULL
262 #define	PAT_WP		0x5ULL
263 #define	PAT_WB		0x6ULL
264 #define	PAT_UCMINUS	0x7ULL
265 	state->msrs[NVMM_X64_MSR_PAT] =
266 	    PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WT) |
267 	    PATENTRY(2, PAT_UCMINUS) | PATENTRY(3, PAT_UC) |
268 	    PATENTRY(4, PAT_WB) | PATENTRY(5, PAT_WT) |
269 	    PATENTRY(6, PAT_UCMINUS) | PATENTRY(7, PAT_UC);
270 
271 	/* Page tables. */
272 	state->crs[NVMM_X64_CR_CR3] = 0x3000;
273 
274 	state->gprs[NVMM_X64_GPR_RIP] = 0x2000;
275 
276 	if (nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
277 		err(errno, "nvmm_vcpu_setstate");
278 }
279 
280 static void
281 map_pages64(struct nvmm_machine *mach)
282 {
283 	pt_entry_t *L4, *L3, *L2, *L1;
284 	int ret;
285 
286 	instbuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
287 	    -1, 0);
288 	if (instbuf == MAP_FAILED)
289 		err(errno, "mmap");
290 
291 	if (nvmm_hva_map(mach, (uintptr_t)instbuf, PAGE_SIZE) == -1)
292 		err(errno, "nvmm_hva_map");
293 	ret = nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE,
294 	    PROT_READ|PROT_EXEC);
295 	if (ret == -1)
296 		err(errno, "nvmm_gpa_map");
297 
298 	L4 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
299 	    -1, 0);
300 	if (L4 == MAP_FAILED)
301 		err(errno, "mmap");
302 	L3 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
303 	    -1, 0);
304 	if (L3 == MAP_FAILED)
305 		err(errno, "mmap");
306 	L2 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
307 	    -1, 0);
308 	if (L2 == MAP_FAILED)
309 		err(errno, "mmap");
310 	L1 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
311 	    -1, 0);
312 	if (L1 == MAP_FAILED)
313 		err(errno, "mmap");
314 
315 	if (nvmm_hva_map(mach, (uintptr_t)L4, PAGE_SIZE) == -1)
316 		err(errno, "nvmm_hva_map");
317 	if (nvmm_hva_map(mach, (uintptr_t)L3, PAGE_SIZE) == -1)
318 		err(errno, "nvmm_hva_map");
319 	if (nvmm_hva_map(mach, (uintptr_t)L2, PAGE_SIZE) == -1)
320 		err(errno, "nvmm_hva_map");
321 	if (nvmm_hva_map(mach, (uintptr_t)L1, PAGE_SIZE) == -1)
322 		err(errno, "nvmm_hva_map");
323 
324 	ret = nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE,
325 	    PROT_READ|PROT_WRITE);
326 	if (ret == -1)
327 		err(errno, "nvmm_gpa_map");
328 	ret = nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE,
329 	    PROT_READ|PROT_WRITE);
330 	if (ret == -1)
331 		err(errno, "nvmm_gpa_map");
332 	ret = nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE,
333 	    PROT_READ|PROT_WRITE);
334 	if (ret == -1)
335 		err(errno, "nvmm_gpa_map");
336 	ret = nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE,
337 	    PROT_READ|PROT_WRITE);
338 	if (ret == -1)
339 		err(errno, "nvmm_gpa_map");
340 
341 	memset(L4, 0, PAGE_SIZE);
342 	memset(L3, 0, PAGE_SIZE);
343 	memset(L2, 0, PAGE_SIZE);
344 	memset(L1, 0, PAGE_SIZE);
345 
346 	L4[0] = PTE_P | PTE_W | 0x4000;
347 	L3[0] = PTE_P | PTE_W | 0x5000;
348 	L2[0] = PTE_P | PTE_W | 0x6000;
349 	L1[0x2000 / PAGE_SIZE] = PTE_P | PTE_W | 0x2000;
350 	L1[0x1000 / PAGE_SIZE] = PTE_P | PTE_W | 0x1000;
351 }
352 
353 /*
354  * 0x1000: MMIO address, unmapped
355  * 0x2000: Instructions, mapped
356  * 0x3000: L4
357  * 0x4000: L3
358  * 0x5000: L2
359  * 0x6000: L1
360  */
361 static void
362 test_vm64(void)
363 {
364 	struct nvmm_machine mach;
365 	struct nvmm_vcpu vcpu;
366 	size_t i;
367 
368 	if (nvmm_machine_create(&mach) == -1)
369 		err(errno, "nvmm_machine_create");
370 	if (nvmm_vcpu_create(&mach, 0, &vcpu) == -1)
371 		err(errno, "nvmm_vcpu_create");
372 	nvmm_vcpu_configure(&mach, &vcpu, NVMM_VCPU_CONF_CALLBACKS, &callbacks);
373 	map_pages64(&mach);
374 
375 	for (i = 0; tests64[i].name != NULL; i++) {
376 		reset_machine64(&mach, &vcpu);
377 		run_test(&mach, &vcpu, &tests64[i]);
378 	}
379 
380 	if (nvmm_vcpu_destroy(&mach, &vcpu) == -1)
381 		err(errno, "nvmm_vcpu_destroy");
382 	if (nvmm_machine_destroy(&mach) == -1)
383 		err(errno, "nvmm_machine_destroy");
384 }
385 
386 /* -------------------------------------------------------------------------- */
387 
388 extern uint8_t test_16bit_1_begin, test_16bit_1_end;
389 extern uint8_t test_16bit_2_begin, test_16bit_2_end;
390 extern uint8_t test_16bit_3_begin, test_16bit_3_end;
391 extern uint8_t test_16bit_4_begin, test_16bit_4_end;
392 extern uint8_t test_16bit_5_begin, test_16bit_5_end;
393 extern uint8_t test_16bit_6_begin, test_16bit_6_end;
394 
395 static const struct test tests16[] = {
396 	{ "16bit test1 - MOV single", &test_16bit_1_begin, &test_16bit_1_end,
397 	  0x023, 0x10f1 - 0x1000 },
398 	{ "16bit test2 - MOV dual", &test_16bit_2_begin, &test_16bit_2_end,
399 	  0x123, 0x10f3 - 0x1000 },
400 	{ "16bit test3 - MOV dual+disp", &test_16bit_3_begin, &test_16bit_3_end,
401 	  0x678, 0x10f1 - 0x1000 },
402 	{ "16bit test4 - Mixed", &test_16bit_4_begin, &test_16bit_4_end,
403 	  0x1011, 0x10f6 - 0x1000 },
404 	{ "16bit test5 - disp16-only", &test_16bit_5_begin, &test_16bit_5_end,
405 	  0x12, 0x1234 - 0x1000 },
406 	{ "16bit test6 - XCHG", &test_16bit_6_begin, &test_16bit_6_end,
407 	  0x1234, 0x1234 - 0x1000 },
408 	{ NULL, NULL, NULL, -1, -1 }
409 };
410 
411 static void
412 reset_machine16(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
413 {
414 	struct nvmm_x64_state *state = vcpu->state;
415 
416 	if (nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
417 		err(errno, "nvmm_vcpu_getstate");
418 
419 	state->segs[NVMM_X64_SEG_CS].base = 0;
420 	state->segs[NVMM_X64_SEG_CS].limit = 0x2FFF;
421 	state->gprs[NVMM_X64_GPR_RIP] = 0x2000;
422 
423 	if (nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
424 		err(errno, "nvmm_vcpu_setstate");
425 }
426 
427 static void
428 map_pages16(struct nvmm_machine *mach)
429 {
430 	int ret;
431 
432 	instbuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
433 	    -1, 0);
434 	if (instbuf == MAP_FAILED)
435 		err(errno, "mmap");
436 
437 	if (nvmm_hva_map(mach, (uintptr_t)instbuf, PAGE_SIZE) == -1)
438 		err(errno, "nvmm_hva_map");
439 	ret = nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE,
440 	    PROT_READ|PROT_EXEC);
441 	if (ret == -1)
442 		err(errno, "nvmm_gpa_map");
443 }
444 
445 /*
446  * 0x1000: MMIO address, unmapped
447  * 0x2000: Instructions, mapped
448  */
449 static void
450 test_vm16(void)
451 {
452 	struct nvmm_machine mach;
453 	struct nvmm_vcpu vcpu;
454 	size_t i;
455 
456 	if (nvmm_machine_create(&mach) == -1)
457 		err(errno, "nvmm_machine_create");
458 	if (nvmm_vcpu_create(&mach, 0, &vcpu) == -1)
459 		err(errno, "nvmm_vcpu_create");
460 	nvmm_vcpu_configure(&mach, &vcpu, NVMM_VCPU_CONF_CALLBACKS, &callbacks);
461 	map_pages16(&mach);
462 
463 	for (i = 0; tests16[i].name != NULL; i++) {
464 		reset_machine16(&mach, &vcpu);
465 		run_test(&mach, &vcpu, &tests16[i]);
466 	}
467 
468 	if (nvmm_vcpu_destroy(&mach, &vcpu) == -1)
469 		err(errno, "nvmm_vcpu_destroy");
470 	if (nvmm_machine_destroy(&mach) == -1)
471 		err(errno, "nvmm_machine_destroy");
472 }
473 
474 /* -------------------------------------------------------------------------- */
475 
476 int main(int argc, char *argv[])
477 {
478 	if (nvmm_init() == -1)
479 		err(errno, "nvmm_init");
480 	test_vm64();
481 	test_vm16();
482 	return 0;
483 }
484