1 /*
2  * Copyright (c) 2018-2021 Maxime Villard, m00nbsd.net
3  * All rights reserved.
4  *
5  * This code is part of the NVMM hypervisor.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <err.h>
36 #include <errno.h>
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <machine/segments.h>
40 #include <machine/psl.h>
41 
42 #include <nvmm.h>
43 
44 #ifdef __NetBSD__
45 
46 #include <machine/pte.h>
47 #define PAGE_SIZE 4096
48 
49 #else /* DragonFly */
50 
51 #include <machine/pmap.h>
52 #define PTE_P		X86_PG_V	/* 0x001: P (Valid) */
53 #define PTE_W		X86_PG_RW	/* 0x002: R/W (Read/Write) */
54 #define PSL_MBO		PSL_RESERVED_DEFAULT	/* 0x00000002 */
55 #define SDT_SYS386BSY	SDT_SYSBSY	/* 11: system 64-bit TSS busy */
56 
57 #endif /* __NetBSD__ */
58 
59 #define IO_SIZE	128
60 
61 static char iobuf[IO_SIZE];
62 
63 static char *databuf;
64 static uint8_t *instbuf;
65 
66 static void
67 init_seg(struct nvmm_x64_state_seg *seg, int type, int sel)
68 {
69 	seg->selector = sel;
70 	seg->attrib.type = type;
71 	seg->attrib.s = (type & 0b10000) != 0;
72 	seg->attrib.dpl = 0;
73 	seg->attrib.p = 1;
74 	seg->attrib.avl = 1;
75 	seg->attrib.l = 1;
76 	seg->attrib.def = 0;
77 	seg->attrib.g = 1;
78 	seg->limit = 0x0000FFFF;
79 	seg->base = 0x00000000;
80 }
81 
82 static void
83 reset_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
84 {
85 	struct nvmm_x64_state *state = vcpu->state;
86 
87 	memset(state, 0, sizeof(*state));
88 
89 	/* Default. */
90 	state->gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
91 	init_seg(&state->segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
92 	init_seg(&state->segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
93 	init_seg(&state->segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
94 	init_seg(&state->segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
95 	init_seg(&state->segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
96 	init_seg(&state->segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
97 
98 	/* Blank. */
99 	init_seg(&state->segs[NVMM_X64_SEG_GDT], 0, 0);
100 	init_seg(&state->segs[NVMM_X64_SEG_IDT], 0, 0);
101 	init_seg(&state->segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
102 	init_seg(&state->segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
103 
104 	/* Protected mode enabled. */
105 	state->crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
106 
107 	/* 64bit mode enabled. */
108 	state->crs[NVMM_X64_CR_CR4] = CR4_PAE;
109 	state->msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
110 
111 	/* Stolen from x86/pmap.c */
112 #define	PATENTRY(n, type)	(type << ((n) * 8))
113 #define	PAT_UC		0x0ULL
114 #define	PAT_WC		0x1ULL
115 #define	PAT_WT		0x4ULL
116 #define	PAT_WP		0x5ULL
117 #define	PAT_WB		0x6ULL
118 #define	PAT_UCMINUS	0x7ULL
119 	state->msrs[NVMM_X64_MSR_PAT] =
120 	    PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WT) |
121 	    PATENTRY(2, PAT_UCMINUS) | PATENTRY(3, PAT_UC) |
122 	    PATENTRY(4, PAT_WB) | PATENTRY(5, PAT_WT) |
123 	    PATENTRY(6, PAT_UCMINUS) | PATENTRY(7, PAT_UC);
124 
125 	/* Page tables. */
126 	state->crs[NVMM_X64_CR_CR3] = 0x3000;
127 
128 	state->gprs[NVMM_X64_GPR_RIP] = 0x2000;
129 
130 	if (nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
131 		err(errno, "nvmm_vcpu_setstate");
132 }
133 
134 static void
135 map_pages(struct nvmm_machine *mach)
136 {
137 	pt_entry_t *L4, *L3, *L2, *L1;
138 	int ret;
139 
140 	instbuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
141 	    -1, 0);
142 	if (instbuf == MAP_FAILED)
143 		err(errno, "mmap");
144 	databuf = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
145 	    -1, 0);
146 	if (databuf == MAP_FAILED)
147 		err(errno, "mmap");
148 
149 	if (nvmm_hva_map(mach, (uintptr_t)instbuf, PAGE_SIZE) == -1)
150 		err(errno, "nvmm_hva_map");
151 	if (nvmm_hva_map(mach, (uintptr_t)databuf, PAGE_SIZE) == -1)
152 		err(errno, "nvmm_hva_map");
153 	ret = nvmm_gpa_map(mach, (uintptr_t)instbuf, 0x2000, PAGE_SIZE,
154 	    PROT_READ|PROT_EXEC);
155 	if (ret == -1)
156 		err(errno, "nvmm_gpa_map");
157 	ret = nvmm_gpa_map(mach, (uintptr_t)databuf, 0x1000, PAGE_SIZE,
158 	    PROT_READ|PROT_WRITE);
159 	if (ret == -1)
160 		err(errno, "nvmm_gpa_map");
161 
162 	L4 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
163 	    -1, 0);
164 	if (L4 == MAP_FAILED)
165 		err(errno, "mmap");
166 	L3 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
167 	    -1, 0);
168 	if (L3 == MAP_FAILED)
169 		err(errno, "mmap");
170 	L2 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
171 	    -1, 0);
172 	if (L2 == MAP_FAILED)
173 		err(errno, "mmap");
174 	L1 = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
175 	    -1, 0);
176 	if (L1 == MAP_FAILED)
177 		err(errno, "mmap");
178 
179 	if (nvmm_hva_map(mach, (uintptr_t)L4, PAGE_SIZE) == -1)
180 		err(errno, "nvmm_hva_map");
181 	if (nvmm_hva_map(mach, (uintptr_t)L3, PAGE_SIZE) == -1)
182 		err(errno, "nvmm_hva_map");
183 	if (nvmm_hva_map(mach, (uintptr_t)L2, PAGE_SIZE) == -1)
184 		err(errno, "nvmm_hva_map");
185 	if (nvmm_hva_map(mach, (uintptr_t)L1, PAGE_SIZE) == -1)
186 		err(errno, "nvmm_hva_map");
187 
188 	ret = nvmm_gpa_map(mach, (uintptr_t)L4, 0x3000, PAGE_SIZE,
189 	    PROT_READ|PROT_WRITE);
190 	if (ret == -1)
191 		err(errno, "nvmm_gpa_map");
192 	ret = nvmm_gpa_map(mach, (uintptr_t)L3, 0x4000, PAGE_SIZE,
193 	    PROT_READ|PROT_WRITE);
194 	if (ret == -1)
195 		err(errno, "nvmm_gpa_map");
196 	ret = nvmm_gpa_map(mach, (uintptr_t)L2, 0x5000, PAGE_SIZE,
197 	    PROT_READ|PROT_WRITE);
198 	if (ret == -1)
199 		err(errno, "nvmm_gpa_map");
200 	ret = nvmm_gpa_map(mach, (uintptr_t)L1, 0x6000, PAGE_SIZE,
201 	    PROT_READ|PROT_WRITE);
202 	if (ret == -1)
203 		err(errno, "nvmm_gpa_map");
204 
205 	memset(L4, 0, PAGE_SIZE);
206 	memset(L3, 0, PAGE_SIZE);
207 	memset(L2, 0, PAGE_SIZE);
208 	memset(L1, 0, PAGE_SIZE);
209 
210 	L4[0] = PTE_P | PTE_W | 0x4000;
211 	L3[0] = PTE_P | PTE_W | 0x5000;
212 	L2[0] = PTE_P | PTE_W | 0x6000;
213 	L1[0x2000 / PAGE_SIZE] = PTE_P | PTE_W | 0x2000;
214 	L1[0x1000 / PAGE_SIZE] = PTE_P | PTE_W | 0x1000;
215 }
216 
217 /* -------------------------------------------------------------------------- */
218 
219 static size_t iobuf_off = 0;
220 
221 static void
222 io_callback(struct nvmm_io *io)
223 {
224 	if (io->port != 123) {
225 		printf("Wrong port\n");
226 		exit(-1);
227 	}
228 
229 	if (io->in) {
230 		memcpy(io->data, iobuf + iobuf_off, io->size);
231 	} else {
232 		memcpy(iobuf + iobuf_off, io->data, io->size);
233 	}
234 	iobuf_off += io->size;
235 
236 }
237 
238 static int
239 handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
240 {
241 	int ret;
242 
243 	ret = nvmm_assist_io(mach, vcpu);
244 	if (ret == -1) {
245 		err(errno, "nvmm_assist_io");
246 	}
247 
248 	return 0;
249 }
250 
251 static void
252 run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
253 {
254 	struct nvmm_vcpu_exit *exit = vcpu->exit;
255 
256 	while (1) {
257 		if (nvmm_vcpu_run(mach, vcpu) == -1)
258 			err(errno, "nvmm_vcpu_run");
259 
260 		switch (exit->reason) {
261 		case NVMM_VCPU_EXIT_NONE:
262 			break;
263 
264 		case NVMM_VCPU_EXIT_RDMSR:
265 			/* Stop here. */
266 			return;
267 
268 		case NVMM_VCPU_EXIT_IO:
269 			handle_io(mach, vcpu);
270 			break;
271 
272 		case NVMM_VCPU_EXIT_SHUTDOWN:
273 			printf("Shutting down!\n");
274 			return;
275 
276 		default:
277 			printf("Invalid!\n");
278 			return;
279 		}
280 	}
281 }
282 
283 /* -------------------------------------------------------------------------- */
284 
285 struct test {
286 	const char *name;
287 	uint8_t *code_begin;
288 	uint8_t *code_end;
289 	const char *wanted;
290 	bool in;
291 };
292 
293 static void
294 run_test(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
295     const struct test *test)
296 {
297 	size_t size;
298 	char *res;
299 
300 	size = (size_t)test->code_end - (size_t)test->code_begin;
301 
302 	reset_machine(mach, vcpu);
303 
304 	iobuf_off = 0;
305 	memset(iobuf, 0, IO_SIZE);
306 	memset(databuf, 0, PAGE_SIZE);
307 	memcpy(instbuf, test->code_begin, size);
308 
309 	if (test->in) {
310 		strcpy(iobuf, test->wanted);
311 	} else {
312 		strcpy(databuf, test->wanted);
313 	}
314 
315 	run_machine(mach, vcpu);
316 
317 	if (test->in) {
318 		res = databuf;
319 	} else {
320 		res = iobuf;
321 	}
322 
323 	if (!strcmp(res, test->wanted)) {
324 		printf("Test '%s' passed\n", test->name);
325 	} else {
326 		printf("Test '%s' failed, wanted '%s', got '%s'\n", test->name,
327 		    test->wanted, res);
328 		errx(-1, "run_test failed");
329 	}
330 }
331 
332 /* -------------------------------------------------------------------------- */
333 
334 extern uint8_t test1_begin, test1_end;
335 extern uint8_t test2_begin, test2_end;
336 extern uint8_t test3_begin, test3_end;
337 extern uint8_t test4_begin, test4_end;
338 extern uint8_t test5_begin, test5_end;
339 extern uint8_t test6_begin, test6_end;
340 extern uint8_t test7_begin, test7_end;
341 extern uint8_t test8_begin, test8_end;
342 extern uint8_t test9_begin, test9_end;
343 extern uint8_t test10_begin, test10_end;
344 extern uint8_t test11_begin, test11_end;
345 extern uint8_t test12_begin, test12_end;
346 
347 static const struct test tests[] = {
348 	{ "test1 - INB", &test1_begin, &test1_end, "12", true },
349 	{ "test2 - INW", &test2_begin, &test2_end, "1234", true },
350 	{ "test3 - INL", &test3_begin, &test3_end, "12345678", true },
351 	{ "test4 - INSB+REP", &test4_begin, &test4_end, "12345", true },
352 	{ "test5 - INSW+REP", &test5_begin, &test5_end,
353 	  "Comment est votre blanquette", true },
354 	{ "test6 - INSL+REP", &test6_begin, &test6_end,
355 	  "123456789abcdefghijklmnopqrs", true },
356 	{ "test7 - OUTB", &test7_begin, &test7_end, "12", false },
357 	{ "test8 - OUTW", &test8_begin, &test8_end, "1234", false },
358 	{ "test9 - OUTL", &test9_begin, &test9_end, "12345678", false },
359 	{ "test10 - OUTSB+REP", &test10_begin, &test10_end, "12345", false },
360 	{ "test11 - OUTSW+REP", &test11_begin, &test11_end,
361 	  "Ah, Herr Bramard", false },
362 	{ "test12 - OUTSL+REP", &test12_begin, &test12_end,
363 	  "123456789abcdefghijklmnopqrs", false },
364 	{ NULL, NULL, NULL, NULL, false }
365 };
366 
367 static struct nvmm_assist_callbacks callbacks = {
368 	.io = io_callback,
369 	.mem = NULL
370 };
371 
372 /*
373  * 0x1000: Data, mapped
374  * 0x2000: Instructions, mapped
375  * 0x3000: L4
376  * 0x4000: L3
377  * 0x5000: L2
378  * 0x6000: L1
379  */
380 int main(int argc, char *argv[])
381 {
382 	struct nvmm_machine mach;
383 	struct nvmm_vcpu vcpu;
384 	size_t i;
385 
386 	if (nvmm_init() == -1)
387 		err(errno, "nvmm_init");
388 	if (nvmm_machine_create(&mach) == -1)
389 		err(errno, "nvmm_machine_create");
390 	if (nvmm_vcpu_create(&mach, 0, &vcpu) == -1)
391 		err(errno, "nvmm_vcpu_create");
392 	nvmm_vcpu_configure(&mach, &vcpu, NVMM_VCPU_CONF_CALLBACKS, &callbacks);
393 	map_pages(&mach);
394 
395 	for (i = 0; tests[i].name != NULL; i++) {
396 		run_test(&mach, &vcpu, &tests[i]);
397 	}
398 
399 	return 0;
400 }
401